@@ -396,83 +396,105 @@ static void uv__rwlock_srwlock_wrunlock(uv_rwlock_t* rwlock) {
396
396
397
397
398
398
static int uv__rwlock_fallback_init (uv_rwlock_t * rwlock ) {
399
- int err ;
400
-
401
- err = uv_mutex_init ( & rwlock -> fallback_ . read_mutex_ );
402
- if ( err )
403
- return err ;
399
+ /* Initialize the semaphore that acts as the write lock. */
400
+ HANDLE handle = CreateSemaphoreW ( NULL , 1 , 1 , NULL );
401
+ if ( handle == NULL )
402
+ return uv_translate_sys_error ( GetLastError ());
403
+ rwlock -> fallback_ . write_lock_ . sem = handle ;
404
404
405
- err = uv_mutex_init (& rwlock -> fallback_ .write_mutex_ );
406
- if (err ) {
407
- uv_mutex_destroy (& rwlock -> fallback_ .read_mutex_ );
408
- return err ;
409
- }
405
+ /* Initialize the critical section protecting the reader count. */
406
+ InitializeCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
410
407
408
+ /* Initialize the reader count. */
411
409
rwlock -> fallback_ .num_readers_ = 0 ;
412
410
413
411
return 0 ;
414
412
}
415
413
416
414
417
415
static void uv__rwlock_fallback_destroy (uv_rwlock_t * rwlock ) {
418
- uv_mutex_destroy (& rwlock -> fallback_ .read_mutex_ );
419
- uv_mutex_destroy ( & rwlock -> fallback_ .write_mutex_ );
416
+ DeleteCriticalSection (& rwlock -> fallback_ .read_lock_ . cs );
417
+ CloseHandle ( rwlock -> fallback_ .write_lock_ . sem );
420
418
}
421
419
422
420
423
421
static void uv__rwlock_fallback_rdlock (uv_rwlock_t * rwlock ) {
424
- uv_mutex_lock (& rwlock -> fallback_ .read_mutex_ );
425
-
426
- if (++ rwlock -> fallback_ .num_readers_ == 1 )
427
- uv_mutex_lock (& rwlock -> fallback_ .write_mutex_ );
422
+ /* Acquire the lock that protects the reader count. */
423
+ EnterCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
424
+
425
+ /* Increase the reader count, and lock for write if this is the first
426
+ * reader.
427
+ */
428
+ if (++ rwlock -> fallback_ .num_readers_ == 1 ) {
429
+ DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , INFINITE );
430
+ if (r != WAIT_OBJECT_0 )
431
+ uv_fatal_error (GetLastError (), "WaitForSingleObject" );
432
+ }
428
433
429
- uv_mutex_unlock (& rwlock -> fallback_ .read_mutex_ );
434
+ /* Release the lock that protects the reader count. */
435
+ LeaveCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
430
436
}
431
437
432
438
433
439
static int uv__rwlock_fallback_tryrdlock (uv_rwlock_t * rwlock ) {
434
440
int err ;
435
441
436
- err = uv_mutex_trylock (& rwlock -> fallback_ .read_mutex_ );
437
- if (err )
438
- goto out ;
442
+ if (!TryEnterCriticalSection (& rwlock -> fallback_ .read_lock_ .cs ))
443
+ return UV_EAGAIN ;
439
444
440
445
err = 0 ;
441
- if (rwlock -> fallback_ .num_readers_ == 0 )
442
- err = uv_mutex_trylock (& rwlock -> fallback_ .write_mutex_ );
443
-
444
- if (err == 0 )
445
- rwlock -> fallback_ .num_readers_ ++ ;
446
-
447
- uv_mutex_unlock (& rwlock -> fallback_ .read_mutex_ );
446
+ if (rwlock -> fallback_ .num_readers_ == 0 ) {
447
+ DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , 0 );
448
+ if (r == WAIT_OBJECT_0 )
449
+ rwlock -> fallback_ .num_readers_ ++ ;
450
+ else if (r == WAIT_TIMEOUT )
451
+ err = UV_EAGAIN ;
452
+ else if (r == WAIT_FAILED )
453
+ err = uv_translate_sys_error (GetLastError ());
454
+ else
455
+ err = UV_EIO ;
456
+ }
448
457
449
- out :
458
+ LeaveCriticalSection ( & rwlock -> fallback_ . read_lock_ . cs );
450
459
return err ;
451
460
}
452
461
453
462
454
463
static void uv__rwlock_fallback_rdunlock (uv_rwlock_t * rwlock ) {
455
- uv_mutex_lock (& rwlock -> fallback_ .read_mutex_ );
464
+ EnterCriticalSection (& rwlock -> fallback_ .read_lock_ . cs );
456
465
457
- if (-- rwlock -> fallback_ .num_readers_ == 0 )
458
- uv_mutex_unlock (& rwlock -> fallback_ .write_mutex_ );
466
+ if (-- rwlock -> fallback_ .num_readers_ == 0 ) {
467
+ if (!ReleaseSemaphore (rwlock -> fallback_ .write_lock_ .sem , 1 , NULL ))
468
+ uv_fatal_error (GetLastError (), "ReleaseSemaphore" );
469
+ }
459
470
460
- uv_mutex_unlock (& rwlock -> fallback_ .read_mutex_ );
471
+ LeaveCriticalSection (& rwlock -> fallback_ .read_lock_ . cs );
461
472
}
462
473
463
474
464
475
static void uv__rwlock_fallback_wrlock (uv_rwlock_t * rwlock ) {
465
- uv_mutex_lock (& rwlock -> fallback_ .write_mutex_ );
476
+ DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , INFINITE );
477
+ if (r != WAIT_OBJECT_0 )
478
+ uv_fatal_error (GetLastError (), "WaitForSingleObject" );
466
479
}
467
480
468
481
469
482
static int uv__rwlock_fallback_trywrlock (uv_rwlock_t * rwlock ) {
470
- return uv_mutex_trylock (& rwlock -> fallback_ .write_mutex_ );
483
+ DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , 0 );
484
+ if (r == WAIT_OBJECT_0 )
485
+ return 0 ;
486
+ else if (r == WAIT_TIMEOUT )
487
+ return UV_EAGAIN ;
488
+ else if (r == WAIT_FAILED )
489
+ return uv_translate_sys_error (GetLastError ());
490
+ else
491
+ return UV_EIO ;
471
492
}
472
493
473
494
474
495
static void uv__rwlock_fallback_wrunlock (uv_rwlock_t * rwlock ) {
475
- uv_mutex_unlock (& rwlock -> fallback_ .write_mutex_ );
496
+ if (!ReleaseSemaphore (rwlock -> fallback_ .write_lock_ .sem , 1 , NULL ))
497
+ uv_fatal_error (GetLastError (), "ReleaseSemaphore" );
476
498
}
477
499
478
500
0 commit comments