|
41 | 41 | #include <gnu/libc-version.h> /* gnu_get_libc_version() */ |
42 | 42 | #endif |
43 | 43 |
|
| 44 | +#if defined(__linux__) |
| 45 | +# include <sched.h> |
| 46 | +# define uv__cpu_set_t cpu_set_t |
| 47 | +#elif defined(__FreeBSD__) |
| 48 | +# include <sys/param.h> |
| 49 | +# include <sys/cpuset.h> |
| 50 | +# include <pthread_np.h> |
| 51 | +# define uv__cpu_set_t cpuset_t |
| 52 | +#endif |
| 53 | + |
| 54 | + |
44 | 55 | #undef NANOSEC |
45 | 56 | #define NANOSEC ((uint64_t) 1e9) |
46 | 57 |
|
@@ -284,6 +295,106 @@ int uv_thread_create_ex(uv_thread_t* tid, |
284 | 295 | return UV__ERR(err); |
285 | 296 | } |
286 | 297 |
|
| 298 | +#if defined(__linux__) || defined(__FreeBSD__) |
| 299 | + |
| 300 | +int uv_thread_setaffinity(uv_thread_t* tid, |
| 301 | + char* cpumask, |
| 302 | + char* oldmask, |
| 303 | + size_t mask_size) { |
| 304 | + int i; |
| 305 | + int r; |
| 306 | + uv__cpu_set_t cpuset; |
| 307 | + int cpumasksize; |
| 308 | + |
| 309 | + cpumasksize = uv_cpumask_size(); |
| 310 | + if (cpumasksize < 0) |
| 311 | + return cpumasksize; |
| 312 | + if (mask_size < (size_t)cpumasksize) |
| 313 | + return UV_EINVAL; |
| 314 | + |
| 315 | + if (oldmask != NULL) { |
| 316 | + r = uv_thread_getaffinity(tid, oldmask, mask_size); |
| 317 | + if (r < 0) |
| 318 | + return r; |
| 319 | + } |
| 320 | + |
| 321 | + CPU_ZERO(&cpuset); |
| 322 | + for (i = 0; i < cpumasksize; i++) |
| 323 | + if (cpumask[i]) |
| 324 | + CPU_SET(i, &cpuset); |
| 325 | + |
| 326 | +#if defined(__ANDROID__) |
| 327 | + if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset)) |
| 328 | + r = errno; |
| 329 | + else |
| 330 | + r = 0; |
| 331 | +#else |
| 332 | + r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset); |
| 333 | +#endif |
| 334 | + |
| 335 | + return UV__ERR(r); |
| 336 | +} |
| 337 | + |
| 338 | + |
| 339 | +int uv_thread_getaffinity(uv_thread_t* tid, |
| 340 | + char* cpumask, |
| 341 | + size_t mask_size) { |
| 342 | + int r; |
| 343 | + int i; |
| 344 | + uv__cpu_set_t cpuset; |
| 345 | + int cpumasksize; |
| 346 | + |
| 347 | + cpumasksize = uv_cpumask_size(); |
| 348 | + if (cpumasksize < 0) |
| 349 | + return cpumasksize; |
| 350 | + if (mask_size < (size_t)cpumasksize) |
| 351 | + return UV_EINVAL; |
| 352 | + |
| 353 | + CPU_ZERO(&cpuset); |
| 354 | +#if defined(__ANDROID__) |
| 355 | + if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset)) |
| 356 | + r = errno; |
| 357 | + else |
| 358 | + r = 0; |
| 359 | +#else |
| 360 | + r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset); |
| 361 | +#endif |
| 362 | + if (r) |
| 363 | + return UV__ERR(r); |
| 364 | + for (i = 0; i < cpumasksize; i++) |
| 365 | + cpumask[i] = !!CPU_ISSET(i, &cpuset); |
| 366 | + |
| 367 | + return 0; |
| 368 | +} |
| 369 | +#else |
| 370 | +int uv_thread_setaffinity(uv_thread_t* tid, |
| 371 | + char* cpumask, |
| 372 | + char* oldmask, |
| 373 | + size_t mask_size) { |
| 374 | + return UV_ENOTSUP; |
| 375 | +} |
| 376 | + |
| 377 | + |
| 378 | +int uv_thread_getaffinity(uv_thread_t* tid, |
| 379 | + char* cpumask, |
| 380 | + size_t mask_size) { |
| 381 | + return UV_ENOTSUP; |
| 382 | +} |
| 383 | +#endif /* defined(__linux__) || defined(UV_BSD_H) */ |
| 384 | + |
| 385 | +int uv_thread_getcpu(void) { |
| 386 | +#if defined(__linux__) || defined(__FreeBSD__) |
| 387 | + int cpu; |
| 388 | + |
| 389 | + cpu = sched_getcpu(); |
| 390 | + if (cpu < 0) |
| 391 | + return UV__ERR(errno); |
| 392 | + |
| 393 | + return cpu; |
| 394 | +#else |
| 395 | + return UV_ENOTSUP; |
| 396 | +#endif |
| 397 | +} |
287 | 398 |
|
288 | 399 | uv_thread_t uv_thread_self(void) { |
289 | 400 | return pthread_self(); |
|
0 commit comments