@@ -419,129 +419,120 @@ def lru_cache(maxsize=128, typed=False):
419419 if maxsize is not None and not isinstance (maxsize , int ):
420420 raise TypeError ('Expected maxsize to be an integer or None' )
421421
422- def decorating_function (user_function ):
423- wrapper = _lru_cache_wrapper (user_function , maxsize , typed , _CacheInfo )
424- return update_wrapper (wrapper , user_function )
425-
426- return decorating_function
427-
428- def _lru_cache_wrapper (user_function , maxsize , typed , _CacheInfo ):
429422 # Constants shared by all lru cache instances:
430423 sentinel = object () # unique object used to signal cache misses
431424 make_key = _make_key # build a key from the function arguments
432425 PREV , NEXT , KEY , RESULT = 0 , 1 , 2 , 3 # names for the link fields
433426
434- cache = {}
435- hits = misses = 0
436- full = False
437- cache_get = cache .get # bound method to lookup a key or return None
438- lock = RLock () # because linkedlist updates aren't threadsafe
439- root = [] # root of the circular doubly linked list
440- root [:] = [root , root , None , None ] # initialize by pointing to self
441-
442- if maxsize == 0 :
443-
444- def wrapper (* args , ** kwds ):
445- # No caching -- just a statistics update after a successful call
446- nonlocal misses
447- result = user_function (* args , ** kwds )
448- misses += 1
449- return result
450-
451- elif maxsize is None :
452-
453- def wrapper (* args , ** kwds ):
454- # Simple caching without ordering or size limit
455- nonlocal hits , misses
456- key = make_key (args , kwds , typed )
457- result = cache_get (key , sentinel )
458- if result is not sentinel :
459- hits += 1
427+ def decorating_function (user_function ):
428+ cache = {}
429+ hits = misses = 0
430+ full = False
431+ cache_get = cache .get # bound method to lookup a key or return None
432+ lock = RLock () # because linkedlist updates aren't threadsafe
433+ root = [] # root of the circular doubly linked list
434+ root [:] = [root , root , None , None ] # initialize by pointing to self
435+
436+ if maxsize == 0 :
437+
438+ def wrapper (* args , ** kwds ):
439+ # No caching -- just a statistics update after a successful call
440+ nonlocal misses
441+ result = user_function (* args , ** kwds )
442+ misses += 1
460443 return result
461- result = user_function (* args , ** kwds )
462- cache [key ] = result
463- misses += 1
464- return result
465444
466- else :
445+ elif maxsize is None :
467446
468- def wrapper (* args , ** kwds ):
469- # Size limited caching that tracks accesses by recency
470- nonlocal root , hits , misses , full
471- key = make_key (args , kwds , typed )
472- with lock :
473- link = cache_get (key )
474- if link is not None :
475- # Move the link to the front of the circular queue
476- link_prev , link_next , _key , result = link
477- link_prev [NEXT ] = link_next
478- link_next [PREV ] = link_prev
479- last = root [PREV ]
480- last [NEXT ] = root [PREV ] = link
481- link [PREV ] = last
482- link [NEXT ] = root
447+ def wrapper (* args , ** kwds ):
448+ # Simple caching without ordering or size limit
449+ nonlocal hits , misses
450+ key = make_key (args , kwds , typed )
451+ result = cache_get (key , sentinel )
452+ if result is not sentinel :
483453 hits += 1
484454 return result
485- result = user_function (* args , ** kwds )
486- with lock :
487- if key in cache :
488- # Getting here means that this same key was added to the
489- # cache while the lock was released. Since the link
490- # update is already done, we need only return the
491- # computed result and update the count of misses.
492- pass
493- elif full :
494- # Use the old root to store the new key and result.
495- oldroot = root
496- oldroot [KEY ] = key
497- oldroot [RESULT ] = result
498- # Empty the oldest link and make it the new root.
499- # Keep a reference to the old key and old result to
500- # prevent their ref counts from going to zero during the
501- # update. That will prevent potentially arbitrary object
502- # clean-up code (i.e. __del__) from running while we're
503- # still adjusting the links.
504- root = oldroot [NEXT ]
505- oldkey = root [KEY ]
506- oldresult = root [RESULT ]
507- root [KEY ] = root [RESULT ] = None
508- # Now update the cache dictionary.
509- del cache [oldkey ]
510- # Save the potentially reentrant cache[key] assignment
511- # for last, after the root and links have been put in
512- # a consistent state.
513- cache [key ] = oldroot
514- else :
515- # Put result in a new link at the front of the queue.
516- last = root [PREV ]
517- link = [last , root , key , result ]
518- last [NEXT ] = root [PREV ] = cache [key ] = link
519- full = (len (cache ) >= maxsize )
455+ result = user_function (* args , ** kwds )
456+ cache [key ] = result
520457 misses += 1
521- return result
458+ return result
522459
523- def cache_info ():
524- """Report cache statistics"""
525- with lock :
526- return _CacheInfo (hits , misses , maxsize , len (cache ))
460+ else :
527461
528- def cache_clear ():
529- """Clear the cache and cache statistics"""
530- nonlocal hits , misses , full
531- with lock :
532- cache .clear ()
533- root [:] = [root , root , None , None ]
534- hits = misses = 0
535- full = False
462+ def wrapper (* args , ** kwds ):
463+ # Size limited caching that tracks accesses by recency
464+ nonlocal root , hits , misses , full
465+ key = make_key (args , kwds , typed )
466+ with lock :
467+ link = cache_get (key )
468+ if link is not None :
469+ # Move the link to the front of the circular queue
470+ link_prev , link_next , _key , result = link
471+ link_prev [NEXT ] = link_next
472+ link_next [PREV ] = link_prev
473+ last = root [PREV ]
474+ last [NEXT ] = root [PREV ] = link
475+ link [PREV ] = last
476+ link [NEXT ] = root
477+ hits += 1
478+ return result
479+ result = user_function (* args , ** kwds )
480+ with lock :
481+ if key in cache :
482+ # Getting here means that this same key was added to the
483+ # cache while the lock was released. Since the link
484+ # update is already done, we need only return the
485+ # computed result and update the count of misses.
486+ pass
487+ elif full :
488+ # Use the old root to store the new key and result.
489+ oldroot = root
490+ oldroot [KEY ] = key
491+ oldroot [RESULT ] = result
492+ # Empty the oldest link and make it the new root.
493+ # Keep a reference to the old key and old result to
494+ # prevent their ref counts from going to zero during the
495+ # update. That will prevent potentially arbitrary object
496+ # clean-up code (i.e. __del__) from running while we're
497+ # still adjusting the links.
498+ root = oldroot [NEXT ]
499+ oldkey = root [KEY ]
500+ oldresult = root [RESULT ]
501+ root [KEY ] = root [RESULT ] = None
502+ # Now update the cache dictionary.
503+ del cache [oldkey ]
504+ # Save the potentially reentrant cache[key] assignment
505+ # for last, after the root and links have been put in
506+ # a consistent state.
507+ cache [key ] = oldroot
508+ else :
509+ # Put result in a new link at the front of the queue.
510+ last = root [PREV ]
511+ link = [last , root , key , result ]
512+ last [NEXT ] = root [PREV ] = cache [key ] = link
513+ full = (len (cache ) >= maxsize )
514+ misses += 1
515+ return result
536516
537- wrapper .cache_info = cache_info
538- wrapper .cache_clear = cache_clear
539- return update_wrapper (wrapper , user_function )
517+ def cache_info ():
518+ """Report cache statistics"""
519+ with lock :
520+ return _CacheInfo (hits , misses , maxsize , len (cache ))
540521
541- try :
542- from _functools import _lru_cache_wrapper
543- except ImportError :
544- pass
522+ def cache_clear ():
523+ """Clear the cache and cache statistics"""
524+ nonlocal hits , misses , full
525+ with lock :
526+ cache .clear ()
527+ root [:] = [root , root , None , None ]
528+ hits = misses = 0
529+ full = False
530+
531+ wrapper .cache_info = cache_info
532+ wrapper .cache_clear = cache_clear
533+ return update_wrapper (wrapper , user_function )
534+
535+ return decorating_function
545536
546537
547538################################################################################
0 commit comments