@@ -482,28 +482,17 @@ pub mod context {
482482 mod v1_support {
483483 use super :: * ;
484484
485- use atomic:: { Atomic , Ordering } ;
486-
487485 #[ cfg( all( feature = "std" , feature = "rng" ) ) ]
488- static CONTEXT : ContextV1 = ContextV1 {
489- count : Atomic :: new ( 0 ) ,
490- } ;
486+ use crate :: std:: sync:: LazyLock ;
491487
492- #[ cfg( all( feature = "std" , feature = "rng" ) ) ]
493- static CONTEXT_INITIALIZED : Atomic < bool > = Atomic :: new ( false ) ;
488+ use atomic:: { Atomic , Ordering } ;
494489
495490 #[ cfg( all( feature = "std" , feature = "rng" ) ) ]
496- pub ( crate ) fn shared_context ( ) -> & ' static ContextV1 {
497- // If the context is in its initial state then assign it to a random value
498- // It doesn't matter if multiple threads observe `false` here and initialize the context
499- if CONTEXT_INITIALIZED
500- . compare_exchange ( false , true , Ordering :: Relaxed , Ordering :: Relaxed )
501- . is_ok ( )
502- {
503- CONTEXT . count . store ( crate :: rng:: u16 ( ) , Ordering :: Release ) ;
504- }
491+ static CONTEXT : LazyLock < ContextV1 > = LazyLock :: new ( ContextV1 :: new_random) ;
505492
506- & CONTEXT
493+ #[ cfg( all( feature = "std" , feature = "rng" ) ) ]
494+ pub ( crate ) fn shared_context_v1 ( ) -> & ' static ContextV1 {
495+ & * CONTEXT
507496 }
508497
509498 /// An internally synchronized, wrapping counter that produces 14-bit values for version 1 and version 6 UUIDs.
@@ -862,35 +851,7 @@ pub mod context {
862851 }
863852 }
864853
865- #[ derive( Debug ) ]
866- struct Adjust {
867- by_ns : u128 ,
868- }
869-
870- impl Adjust {
871- #[ inline]
872- fn by_millis ( millis : u32 ) -> Self {
873- Adjust {
874- by_ns : ( millis as u128 ) . saturating_mul ( 1_000_000 ) ,
875- }
876- }
877-
878- #[ inline]
879- fn apply ( & self , seconds : u64 , subsec_nanos : u32 ) -> ( u64 , u32 ) {
880- if self . by_ns == 0 {
881- // No shift applied
882- return ( seconds, subsec_nanos) ;
883- }
884-
885- let ts = ( seconds as u128 )
886- . saturating_mul ( 1_000_000_000 )
887- . saturating_add ( subsec_nanos as u128 )
888- . saturating_add ( self . by_ns ) ;
889-
890- ( ( ts / 1_000_000_000 ) as u64 , ( ts % 1_000_000_000 ) as u32 )
891- }
892- }
893-
854+ /// A timestamp that keeps track of whether a reseed is necessary.
894855 #[ derive( Debug , Default , Clone , Copy ) ]
895856 struct ReseedingTimestamp {
896857 last_seed : u64 ,
@@ -899,6 +860,21 @@ pub mod context {
899860 }
900861
901862 impl ReseedingTimestamp {
863+ #[ inline]
864+ fn from_ts ( seconds : u64 , subsec_nanos : u32 ) -> Self {
865+ // Reseed when the millisecond advances
866+ let last_seed = seconds
867+ . saturating_mul ( 1_000 )
868+ . saturating_add ( ( subsec_nanos / 1_000_000 ) as u64 ) ;
869+
870+ ReseedingTimestamp {
871+ last_seed,
872+ seconds,
873+ subsec_nanos,
874+ }
875+ }
876+
877+ /// Advance the timestamp to a new value, returning whether a reseed is necessary.
902878 #[ inline]
903879 fn advance ( & self , seconds : u64 , subsec_nanos : u32 ) -> ( Self , bool ) {
904880 let incoming = ReseedingTimestamp :: from_ts ( seconds, subsec_nanos) ;
@@ -916,20 +892,7 @@ pub mod context {
916892 }
917893 }
918894
919- #[ inline]
920- fn from_ts ( seconds : u64 , subsec_nanos : u32 ) -> Self {
921- // Reseed when the millisecond advances
922- let last_seed = seconds
923- . saturating_mul ( 1_000 )
924- . saturating_add ( ( subsec_nanos / 1_000_000 ) as u64 ) ;
925-
926- ReseedingTimestamp {
927- last_seed,
928- seconds,
929- subsec_nanos,
930- }
931- }
932-
895+ /// Advance the timestamp by a millisecond.
933896 #[ inline]
934897 fn increment ( & self ) -> Self {
935898 let ( seconds, subsec_nanos) =
@@ -944,6 +907,76 @@ pub mod context {
944907 }
945908 }
946909
910+ /// A counter that initializes to a safe random seed and tracks overflow.
911+ #[ derive( Debug , Clone , Copy ) ]
912+ struct Counter {
913+ value : u64 ,
914+ }
915+
916+ impl Counter {
917+ #[ inline]
918+ fn reseed ( precision : & Precision , timestamp : & ReseedingTimestamp ) -> Self {
919+ Counter {
920+ value : precision. apply ( crate :: rng:: u64 ( ) & RESEED_MASK , timestamp) ,
921+ }
922+ }
923+
924+ /// Advance the counter.
925+ #[ inline]
926+ fn increment ( & self , precision : & Precision , timestamp : & ReseedingTimestamp ) -> Self {
927+ let mut counter = Counter {
928+ value : precision. apply ( self . value , timestamp) ,
929+ } ;
930+
931+ // We unconditionally increment the counter even though the precision
932+ // may have set higher bits already. This could technically be avoided,
933+ // but the higher bits are a coarse approximation so we just avoid the
934+ // `if` branch and increment it either way
935+
936+ // Guaranteed to never overflow u64
937+ counter. value += 1 ;
938+
939+ counter
940+ }
941+
942+ #[ inline]
943+ fn has_overflowed ( & self ) -> bool {
944+ self . value > MAX_COUNTER
945+ }
946+ }
947+
948+ /// A utility that adjusts an input timestamp by a given number of nanoseconds.
949+ #[ derive( Debug ) ]
950+ struct Adjust {
951+ by_ns : u128 ,
952+ }
953+
954+ impl Adjust {
955+ #[ inline]
956+ fn by_millis ( millis : u32 ) -> Self {
957+ Adjust {
958+ by_ns : ( millis as u128 ) . saturating_mul ( 1_000_000 ) ,
959+ }
960+ }
961+
962+ /// Apply the adjustment, returning the adjusted timestamp.
963+ #[ inline]
964+ fn apply ( & self , seconds : u64 , subsec_nanos : u32 ) -> ( u64 , u32 ) {
965+ if self . by_ns == 0 {
966+ // No shift applied
967+ return ( seconds, subsec_nanos) ;
968+ }
969+
970+ let ts = ( seconds as u128 )
971+ . saturating_mul ( 1_000_000_000 )
972+ . saturating_add ( subsec_nanos as u128 )
973+ . saturating_add ( self . by_ns ) ;
974+
975+ ( ( ts / 1_000_000_000 ) as u64 , ( ts % 1_000_000_000 ) as u32 )
976+ }
977+ }
978+
979+ /// A utility that overwrites some number of counter bits with additional timestamp precision.
947980 #[ derive( Debug ) ]
948981 struct Precision {
949982 bits : usize ,
@@ -971,52 +1004,17 @@ pub mod context {
9711004 }
9721005 }
9731006
1007+ /// Apply additional precision from the given timestamp to the counter.
9741008 #[ inline]
975- fn apply ( & self , value : u64 , timestamp : & ReseedingTimestamp ) -> u64 {
1009+ fn apply ( & self , counter : u64 , timestamp : & ReseedingTimestamp ) -> u64 {
9761010 if self . bits == 0 {
9771011 // No additional precision is being used
978- return value ;
1012+ return counter ;
9791013 }
9801014
9811015 let additional = timestamp. submilli_nanos ( ) as u64 / self . factor ;
9821016
983- ( value & self . mask ) | ( additional << self . shift )
984- }
985- }
986-
987- #[ derive( Debug , Clone , Copy ) ]
988- struct Counter {
989- value : u64 ,
990- }
991-
992- impl Counter {
993- #[ inline]
994- fn reseed ( precision : & Precision , timestamp : & ReseedingTimestamp ) -> Self {
995- Counter {
996- value : precision. apply ( crate :: rng:: u64 ( ) & RESEED_MASK , timestamp) ,
997- }
998- }
999-
1000- #[ inline]
1001- fn increment ( & self , precision : & Precision , timestamp : & ReseedingTimestamp ) -> Self {
1002- let mut counter = Counter {
1003- value : precision. apply ( self . value , timestamp) ,
1004- } ;
1005-
1006- // We unconditionally increment the counter even though the precision
1007- // may have set higher bits already. This could technically be avoided,
1008- // but the higher bits are a coarse approximation so we just avoid the
1009- // `if` branch and increment it either way
1010-
1011- // Guaranteed to never overflow u64
1012- counter. value += 1 ;
1013-
1014- counter
1015- }
1016-
1017- #[ inline]
1018- fn has_overflowed ( & self ) -> bool {
1019- self . value > MAX_COUNTER
1017+ ( counter & self . mask ) | ( additional << self . shift )
10201018 }
10211019 }
10221020
0 commit comments