-
-
Notifications
You must be signed in to change notification settings - Fork 34.6k
gh-108337: Add pyatomic.h header #108338
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
gh-108337: Add pyatomic.h header #108338
Changes from 9 commits
2bc82a6
9adf4f8
d70e8ae
927430d
2d6f950
bf27448
60b56f1
0131868
0474e2f
462c20a
3078328
4daf1a2
f932c77
e720736
ee6e49f
ca8c3b3
2568ad9
2d08290
71d981e
457ce21
9dd0f0b
7611965
a8e2538
433319f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,397 @@ | ||
| #ifndef Py_ATOMIC_H | ||
| #define Py_ATOMIC_H | ||
|
|
||
| // This header provides cross-platform low-level atomic operations | ||
| // similar to C11 atomics. | ||
| // | ||
| // Operations are sequentially consistent unless they have a suffix indicating | ||
| // otherwise. If in doubt, prefer the sequentially consistent operations. | ||
| // | ||
| // The "_relaxed" suffix for load and store operations indicates the "relaxed" | ||
| // memory order. They don't provide synchronization, but (roughly speaking) | ||
| // guarantee somewhat sane behavior for races instead of undefined behavior. | ||
| // In practice, they correspond to "normal" hardware load and store instructions, | ||
| // so they are almost as inexpensive as plain loads and stores in C. | ||
| // | ||
| // Note that atomic read-modify-write operations like _Py_atomic_add_* return | ||
| // the previous value of the atomic variable, not the new value. | ||
| // | ||
| // See https://en.cppreference.com/w/c/atomic for more information on C11 atomics. | ||
| // See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf | ||
| // "A Relaxed Guide to memory_order_relaxed" for discussion of and common usage | ||
| // or relaxed atomics. | ||
|
|
||
| // Atomically adds `value` to `address` and returns the previous value | ||
| static inline int | ||
| _Py_atomic_add_int(int *address, int value); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The glib library uses atomic for the first argument. Maybe it's a better name than address or pointer, what do you think?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I slightly prefer address or ptr rather than atomic
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I dislike the For add operation, the C11 API uses arg for the second parameter name. I'm fine with value.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think If you dislike
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For context, the GCC documentation for built-in atomic functions uses https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html |
||
|
|
||
| static inline int8_t | ||
| _Py_atomic_add_int8(int8_t *address, int8_t value); | ||
|
|
||
| static inline int16_t | ||
| _Py_atomic_add_int16(int16_t *address, int16_t value); | ||
|
|
||
| static inline int32_t | ||
| _Py_atomic_add_int32(int32_t *address, int32_t value); | ||
|
|
||
| static inline int64_t | ||
| _Py_atomic_add_int64(int64_t *address, int64_t value); | ||
|
|
||
| static inline intptr_t | ||
| _Py_atomic_add_intptr(intptr_t *address, intptr_t value); | ||
|
|
||
| static inline unsigned int | ||
| _Py_atomic_add_uint(unsigned int *address, unsigned int value); | ||
|
|
||
| static inline uint8_t | ||
| _Py_atomic_add_uint8(uint8_t *address, uint8_t value); | ||
|
|
||
| static inline uint16_t | ||
| _Py_atomic_add_uint16(uint16_t *address, uint16_t value); | ||
|
|
||
| static inline uint32_t | ||
| _Py_atomic_add_uint32(uint32_t *address, uint32_t value); | ||
|
|
||
| static inline uint64_t | ||
| _Py_atomic_add_uint64(uint64_t *address, uint64_t value); | ||
|
|
||
| static inline uintptr_t | ||
| _Py_atomic_add_uintptr(uintptr_t *address, uintptr_t value); | ||
|
|
||
| static inline Py_ssize_t | ||
| _Py_atomic_add_ssize(Py_ssize_t *address, Py_ssize_t value); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure it's worth exposing atomic ops for all int sizes and signednesses?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I expect to use at least one atomic operation on each of the data types here (but not every atomic op on every data type). I tried to be consistent on what's defined because it makes understanding what's available easier and testing easier. |
||
|
|
||
| // Performs an atomic compare-and-exchange. If `*address` and `expected` are equal, | ||
| // then `value` is stored in `*address`. Returns 1 on success and 0 on failure. | ||
| // These correspond to the "strong" variations of the C11 atomic_compare_exchange_* functions. | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. C11 passes
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The motivation was two-fold: First, many of the _Py_atomic_compare_exchange calls in the nogil fork use constants as vs. Second, I find this style (no pointer for expected) to be a bit less error-prone. I've been tripped up once or twice by having I don't feel terribly strongly about this, so if there is a general preference for sticking closer to the C11-style API here, I can change it.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. AFAIU the main motivation for the C11 style APIs is for retry loops in lockless data structure implementations. A simplistic example (this may be embarassingly wrong): struct ListNode;
typedef struct ListNode {
int value;
struct ListNode* next;
} ListNode;
void ListAppend(ListNode* list, int new_value) {
ListNode* new_node = (ListNode*) malloc(sizeof ListNode);
new_node->value = new_value;
new_node->next = NULL;
ListNode* expected = NULL;
while (_Py_atomic_compare_exchange_ptr(&list->next, &expected, new_node)) {
list = expected;
expected = NULL;
}
}
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If the second argument is a reference ( For C11 atomic_exchange(), the second argument is not a pointer, but a value (integer), no? https://en.cppreference.com/w/c/atomic/atomic_exchange C11 atomic_compare_exchange_strong() and atomic_compare_exchange_weak() use a pointer for expected. But this API writes into
if (memcmp(obj, expected, sizeof *obj) == 0) {
memcpy(obj, &desired, sizeof *obj);
return true;
} else {
memcpy(expected, obj, sizeof *obj);
return false;
}For this header fie, I would prefer to not have two flavors, the API is already quite long! I would prefer to have a single flavor. If there is an usecase where setting expected is relevant, I suggest to use a pointer for the second argument. In short, I agree to change the API to obj, expected and desired names come from the C11 API.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll make the second argument a pointer like the C11 API. |
||
| static inline int | ||
| _Py_atomic_compare_exchange_int(int *address, int expected, int value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_int8(int8_t *address, int8_t expected, int8_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_int16(int16_t *address, int16_t expected, int16_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_int32(int32_t *address, int32_t expected, int32_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_int64(int64_t *address, int64_t expected, int64_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_intptr(intptr_t *address, intptr_t expected, intptr_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_uint(unsigned int *address, unsigned int expected, unsigned int value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_uint8(uint8_t *address, uint8_t expected, uint8_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_uint16(uint16_t *address, uint16_t expected, uint16_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_uint32(uint32_t *address, uint32_t expected, uint32_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_uint64(uint64_t *address, uint64_t expected, uint64_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_uintptr(uintptr_t *address, uintptr_t expected, uintptr_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_ssize(Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value); | ||
|
|
||
| static inline int | ||
| _Py_atomic_compare_exchange_ptr(void *address, void *expected, void *value); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should it be
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The problem is that but if
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. A large part of the Python C API uses macro to convert arguments to
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't see the benefit of that style over the current approach, and it would silently allow passing some integer types to
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right, in that case I'm fine with the surprising |
||
|
|
||
| // Atomically replaces `*address` with `value` and returns the previous value of `*address`. | ||
| static inline int | ||
| _Py_atomic_exchange_int(int *address, int value); | ||
|
|
||
| static inline int8_t | ||
| _Py_atomic_exchange_int8(int8_t *address, int8_t value); | ||
|
|
||
| static inline int16_t | ||
| _Py_atomic_exchange_int16(int16_t *address, int16_t value); | ||
|
|
||
| static inline int32_t | ||
| _Py_atomic_exchange_int32(int32_t *address, int32_t value); | ||
|
|
||
| static inline int64_t | ||
| _Py_atomic_exchange_int64(int64_t *address, int64_t value); | ||
|
|
||
| static inline intptr_t | ||
| _Py_atomic_exchange_intptr(intptr_t *address, intptr_t value); | ||
|
|
||
| static inline unsigned int | ||
| _Py_atomic_exchange_uint(unsigned int *address, unsigned int value); | ||
|
|
||
| static inline uint8_t | ||
| _Py_atomic_exchange_uint8(uint8_t *address, uint8_t value); | ||
|
|
||
| static inline uint16_t | ||
| _Py_atomic_exchange_uint16(uint16_t *address, uint16_t value); | ||
|
|
||
| static inline uint32_t | ||
| _Py_atomic_exchange_uint32(uint32_t *address, uint32_t value); | ||
|
|
||
| static inline uint64_t | ||
| _Py_atomic_exchange_uint64(uint64_t *address, uint64_t value); | ||
|
|
||
| static inline uintptr_t | ||
| _Py_atomic_exchange_uintptr(uintptr_t *address, uintptr_t value); | ||
|
|
||
| static inline Py_ssize_t | ||
| _Py_atomic_exchange_ssize(Py_ssize_t *address, Py_ssize_t value); | ||
|
|
||
| static inline void * | ||
| _Py_atomic_exchange_ptr(void *address, void *value); | ||
|
|
||
| // Performs `*address &= value` atomically and returns the previous value of `*address`. | ||
| static inline uint8_t | ||
| _Py_atomic_and_uint8(uint8_t *address, uint8_t value); | ||
|
|
||
| static inline uint16_t | ||
| _Py_atomic_and_uint16(uint16_t *address, uint16_t value); | ||
|
|
||
| static inline uint32_t | ||
| _Py_atomic_and_uint32(uint32_t *address, uint32_t value); | ||
|
|
||
| static inline uint64_t | ||
| _Py_atomic_and_uint64(uint64_t *address, uint64_t value); | ||
|
|
||
| static inline uintptr_t | ||
| _Py_atomic_and_uintptr(uintptr_t *address, uintptr_t value); | ||
|
|
||
| // Performs `*address |= value` atomically and returns the previous value of `*address`. | ||
| static inline uint8_t | ||
| _Py_atomic_or_uint8(uint8_t *address, uint8_t value); | ||
|
|
||
| static inline uint16_t | ||
| _Py_atomic_or_uint16(uint16_t *address, uint16_t value); | ||
|
|
||
| static inline uint32_t | ||
| _Py_atomic_or_uint32(uint32_t *address, uint32_t value); | ||
|
|
||
| static inline uint64_t | ||
| _Py_atomic_or_uint64(uint64_t *address, uint64_t value); | ||
|
|
||
| static inline uintptr_t | ||
| _Py_atomic_or_uintptr(uintptr_t *address, uintptr_t value); | ||
|
|
||
| // Atomically loads `*address` (sequential consistency) | ||
| static inline int | ||
| _Py_atomic_load_int(const int *address); | ||
|
|
||
| static inline int8_t | ||
| _Py_atomic_load_int8(const int8_t *address); | ||
|
|
||
| static inline int16_t | ||
| _Py_atomic_load_int16(const int16_t *address); | ||
|
|
||
| static inline int32_t | ||
| _Py_atomic_load_int32(const int32_t *address); | ||
|
|
||
| static inline int64_t | ||
| _Py_atomic_load_int64(const int64_t *address); | ||
|
|
||
| static inline intptr_t | ||
| _Py_atomic_load_intptr(const intptr_t *address); | ||
|
|
||
| static inline uint8_t | ||
| _Py_atomic_load_uint8(const uint8_t *address); | ||
|
|
||
| static inline uint16_t | ||
| _Py_atomic_load_uint16(const uint16_t *address); | ||
|
|
||
| static inline uint32_t | ||
| _Py_atomic_load_uint32(const uint32_t *address); | ||
|
|
||
| static inline uint64_t | ||
| _Py_atomic_load_uint64(const uint64_t *address); | ||
|
|
||
| static inline uintptr_t | ||
| _Py_atomic_load_uintptr(const uintptr_t *address); | ||
|
|
||
| static inline unsigned int | ||
| _Py_atomic_load_uint(const unsigned int *address); | ||
|
|
||
| static inline Py_ssize_t | ||
| _Py_atomic_load_ssize(const Py_ssize_t *address); | ||
|
|
||
| static inline void * | ||
| _Py_atomic_load_ptr(const void *address); | ||
|
|
||
| // Loads `*address` (relaxed consistency, i.e., no ordering) | ||
| static inline int | ||
| _Py_atomic_load_int_relaxed(const int *address); | ||
|
|
||
| static inline int8_t | ||
| _Py_atomic_load_int8_relaxed(const int8_t *address); | ||
|
|
||
| static inline int16_t | ||
| _Py_atomic_load_int16_relaxed(const int16_t *address); | ||
|
|
||
| static inline int32_t | ||
| _Py_atomic_load_int32_relaxed(const int32_t *address); | ||
|
|
||
| static inline int64_t | ||
| _Py_atomic_load_int64_relaxed(const int64_t *address); | ||
|
|
||
| static inline intptr_t | ||
| _Py_atomic_load_intptr_relaxed(const intptr_t *address); | ||
|
|
||
| static inline uint8_t | ||
| _Py_atomic_load_uint8_relaxed(const uint8_t *address); | ||
|
|
||
| static inline uint16_t | ||
| _Py_atomic_load_uint16_relaxed(const uint16_t *address); | ||
|
|
||
| static inline uint32_t | ||
| _Py_atomic_load_uint32_relaxed(const uint32_t *address); | ||
|
|
||
| static inline uint64_t | ||
| _Py_atomic_load_uint64_relaxed(const uint64_t *address); | ||
|
|
||
| static inline uintptr_t | ||
| _Py_atomic_load_uintptr_relaxed(const uintptr_t *address); | ||
|
|
||
| static inline unsigned int | ||
| _Py_atomic_load_uint_relaxed(const unsigned int *address); | ||
|
|
||
| static inline Py_ssize_t | ||
| _Py_atomic_load_ssize_relaxed(const Py_ssize_t *address); | ||
|
|
||
| static inline void * | ||
| _Py_atomic_load_ptr_relaxed(const void *address); | ||
|
|
||
| // Atomically performs `*address = value` (sequential consistency) | ||
| static inline void | ||
| _Py_atomic_store_int(int *address, int value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_int8(int8_t *address, int8_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_int16(int16_t *address, int16_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_int32(int32_t *address, int32_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_int64(int64_t *address, int64_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_intptr(intptr_t *address, intptr_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint8(uint8_t *address, uint8_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint16(uint16_t *address, uint16_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint32(uint32_t *address, uint32_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint64(uint64_t *address, uint64_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uintptr(uintptr_t *address, uintptr_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint(unsigned int *address, unsigned int value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_ptr(void *address, void *value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_ssize(Py_ssize_t* address, Py_ssize_t value); | ||
|
|
||
| // Stores `*address = value` (relaxed consistency, i.e., no ordering) | ||
| static inline void | ||
| _Py_atomic_store_int_relaxed(int *address, int value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_int8_relaxed(int8_t *address, int8_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_int16_relaxed(int16_t *address, int16_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_int32_relaxed(int32_t *address, int32_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_int64_relaxed(int64_t *address, int64_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_intptr_relaxed(intptr_t *address, intptr_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint8_relaxed(uint8_t* address, uint8_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint16_relaxed(uint16_t *address, uint16_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint32_relaxed(uint32_t *address, uint32_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint64_relaxed(uint64_t *address, uint64_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uintptr_relaxed(uintptr_t *address, uintptr_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_uint_relaxed(unsigned int *address, unsigned int value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_ptr_relaxed(void *address, void *value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_ssize_relaxed(Py_ssize_t *address, Py_ssize_t value); | ||
|
|
||
| // Stores `*address = value` (release operation) | ||
| static inline void | ||
| _Py_atomic_store_uint64_release(uint64_t *address, uint64_t value); | ||
|
|
||
| static inline void | ||
| _Py_atomic_store_ptr_release(void *address, void *value); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not an expert, but why is it useful to expose "release" operations if no "acquire" operations are exposed?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You can always use stronger orderings for correctness (i.e., "seq_cst" everywhere instead of "acquire"). From a performance view, "release" is substantially faster than "seq_cst" stores on x86/x86-64, but "acquire" generates the same code as "seq_cst" loads on both x86/x86-64 and aarch64.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think CPython support is limited to x86 and ARM variants, so the set of atomic ops exposed should probably be made consistent nevertheless? Also, using "seq_cst" in combination with "release" will probably make the code more difficult to reason about, than if "acquire" is exposed (and memory ordering is already hard to reason about!).
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll add a |
||
|
|
||
|
|
||
| // Sequential consistency fence | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would prefer a more elaborated documentation, "fence" is kind of weak.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The challenge I have is that it's really hard to describe what fences do in a way that's helpful and accurate. The above documentation is too strong for C11 fences. There's https://en.cppreference.com/w/c/atomic/atomic_thread_fence, but I find it vague. And the C++ documentation (https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence) is more detailed but really hard to understand.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add one or two of these links here. It's ok to have references to external doc, it's better than no doc :-) |
||
| static inline void | ||
| _Py_atomic_fence_seq_cst(void); | ||
|
|
||
| // Release fence | ||
| static inline void | ||
| _Py_atomic_fence_release(void); | ||
|
colesbury marked this conversation as resolved.
Outdated
|
||
|
|
||
|
|
||
| #ifndef _Py_USE_GCC_BUILTIN_ATOMICS | ||
| #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) | ||
| #define _Py_USE_GCC_BUILTIN_ATOMICS 1 | ||
| #elif defined(__clang__) | ||
| #if __has_builtin(__atomic_load) | ||
| #define _Py_USE_GCC_BUILTIN_ATOMICS 1 | ||
| #endif | ||
| #endif | ||
| #endif | ||
|
colesbury marked this conversation as resolved.
|
||
|
|
||
| #if _Py_USE_GCC_BUILTIN_ATOMICS | ||
| #define Py_ATOMIC_GCC_H | ||
| #include "cpython/pyatomic_gcc.h" | ||
| #elif __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) | ||
| #define Py_ATOMIC_STD_H | ||
| #include "cpython/pyatomic_std.h" | ||
| #elif defined(_MSC_VER) | ||
| #define Py_ATOMIC_MSC_H | ||
| #include "cpython/pyatomic_msc.h" | ||
| #else | ||
| #error "define pyatomic for this platform" | ||
| #endif | ||
|
colesbury marked this conversation as resolved.
|
||
|
|
||
| #endif /* Py_ATOMIC_H */ | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.