forked from SublimeCodeIntel/SublimeCodeIntel
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlanglib.py
More file actions
1342 lines (1187 loc) · 57.5 KB
/
langlib.py
File metadata and controls
1342 lines (1187 loc) · 57.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""The langzone of the codeintel database.
See the database/database.py module docstring for an overview.
"""
import sys
import os
from os.path import (join, dirname, exists, expanduser, splitext, basename,
split, abspath, isabs, isdir, isfile, normpath)
import threading
import time
from glob import glob
from pprint import pprint, pformat
import logging
from io import BytesIO
import codecs
import copy
import ciElementTree as ET
from codeintel2.common import *
from codeintel2 import util
from codeintel2.database.util import rmdir
from codeintel2.database.langlibbase import LangDirsLibBase
#---- globals
log = logging.getLogger("codeintel.db")
# log.setLevel(logging.DEBUG)
#---- Database zone and lib implementations
class LangDirsLib(LangDirsLibBase):
"""A zone providing a view into an ordered list of dirs in a
db/$lang/... area of the db.
These are dished out via Database.get_lang_lib(), which indirectly
then is dished out by the LangZone.get_lib(). Mostly this is just a
view on the LangZone singleton for this particular language.
Dev Notes:
- The goal is to provide quick has_blob() and get_blob() -- i.e.
some caching is involved (if 'foo' referred to
'some/path/to/foo.py' a minute ago then it still does). As well,
scanning/loading is done automatically as necessary. For example,
if a request for Perl blob 'Bar' is made but there is no 'Bar' in
the database yet, this code looks for a 'Bar.pm' on the file
system and will scan it, load it and return the blob for it.
"""
def __init__(self, lang_zone, lock, lang, name, dirs):
LangDirsLibBase.__init__(self)
self.lang_zone = lang_zone
self._lock = lock
self.mgr = lang_zone.mgr
self.lang = lang
self.name = name
self.dirs = dirs
self.import_handler \
= self.mgr.citadel.import_handler_from_lang(self.lang)
self._blob_imports_from_prefix_cache = {}
self._importables_from_dir_cache = {}
# We keep a "weak" merged cache of blobname lookup for all dirs
# in this zone -- where "weak" means that we verify a hit by
# checking the current real blob_index for that dir (which may
# have changed). This caching slows down lookup for single-dir
# LangDirsZones, but should scale better for LangDirsZones with
# many dirs. (TODO-PERF: test this assertion.)
self._dir_and_blobbase_from_blobname = {}
def __repr__(self):
return "<%s %s>" % (self.lang, self.name)
def _acquire_lock(self):
self._lock.acquire()
def _release_lock(self):
self._lock.release()
def has_blob(self, blobname, ctlr=None):
dbsubpath = self._dbsubpath_from_blobname(blobname, ctlr=ctlr)
return dbsubpath is not None
def has_blob_in_db(self, blobname, ctlr=None):
"""Return true if the blobname is in the database.
Typically this method is only used for debugging and .has_blob()
is what you want.
"""
dbsubpath = self._dbsubpath_from_blobname(
blobname, ctlr=ctlr, only_look_in_db=True)
return dbsubpath is not None
def get_blob(self, blobname, ctlr=None):
self._acquire_lock()
try:
dbsubpath = self._dbsubpath_from_blobname(blobname, ctlr=ctlr)
if dbsubpath is not None:
return self.lang_zone.load_blob(dbsubpath)
else:
return None
finally:
self._release_lock()
def get_blob_imports(self, prefix):
"""Return the set of imports under the given prefix.
"prefix" is a tuple of import name parts. E.g. ("xml", "sax")
for "import xml.sax." in Python. Or ("XML", "Parser") for
"use XML::Parser::" in Perl.
See description in database.py docstring for details.
"""
self._acquire_lock()
try:
if prefix not in self._blob_imports_from_prefix_cache:
if prefix:
for dir in self.dirs:
importables = self._importables_from_dir(dir)
if prefix[0] in importables:
sub_importables = self._importables_from_dir(
join(dir, *prefix))
imports = set(
(name, is_dir_import)
for name, (_, _, is_dir_import)
in list(sub_importables.items())
)
break
else:
imports = set()
else:
imports = set()
for dir in self.dirs:
importables = self._importables_from_dir(dir)
imports.update(
(name, is_dir_import)
for name, (_, _, is_dir_import)
in list(importables.items())
)
self._blob_imports_from_prefix_cache[prefix] = imports
return self._blob_imports_from_prefix_cache[prefix]
finally:
self._release_lock()
def blobs_with_basename(self, basename, ctlr=None):
"""Return all blobs that match the given base path.
I.e. a filename lookup across all files in the dirs of this lib.
"basename" is a string, e.g. 'Http.js'
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
A "blob" is a global scope-tag hit in all of the blobs for the execution
set buffers.
Returns the empty list if no hits.
"""
self.ensure_all_dirs_scanned(ctlr=ctlr)
blobs = []
# we can't use self.get_blob because that only returns one answer; we
# we need all of them.
self._acquire_lock()
try:
for dir in self.dirs:
dbfile_from_blobname = self.lang_zone.dfb_from_dir(dir, {})
blobbase = dbfile_from_blobname.get(basename)
if blobbase is not None:
dhash = self.lang_zone.dhash_from_dir(dir)
dbsubpath = join(dhash, blobbase)
blobs.append(self.lang_zone.load_blob(dbsubpath))
finally:
self._release_lock()
return blobs
def hits_from_lpath(self, lpath, ctlr=None, curr_buf=None):
"""Return all hits of the given lookup path.
I.e. a symbol table lookup across all files in the dirs of this
lib.
"lpath" is a lookup name list, e.g. ['Casper', 'Logging']
or ['dojo', 'animation'].
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
"curr_buf" (optional), if specified, is the current buf for
which this query is being made. Hits from it should be
skipped (i.e. don't bother searching it).
A "hit" is (<CIX node>, <scope-ref>). Each one represent a
scope-tag or variable-tag hit in all of the blobs for the
execution set buffers.
Returns the empty list if no hits.
"""
assert isinstance(lpath, tuple) # common mistake to pass in a string
# Need to have (at least once) scanned all importables.
# Responsibility for ensuring the scan data is *up-to-date*
# is elsewhere.
self.ensure_all_dirs_scanned(ctlr=ctlr)
if curr_buf:
curr_blobname = curr_buf.blob_from_lang.get(
self.lang, {}).get("name")
curr_buf_dir = dirname(curr_buf.path)
# Naive implementation (no caching)
hits = []
for dir in self.dirs:
if ctlr and ctlr.is_aborted():
log.debug("ctlr aborted")
break
toplevelname_index = self.lang_zone.load_index(
dir, "toplevelname_index", {})
for blobname in toplevelname_index.get_blobnames(lpath[0], ()):
if curr_buf and curr_buf_dir == dir and blobname == curr_blobname:
continue
blob = self.get_blob(blobname, ctlr=ctlr)
try:
elem = blob
for p in lpath:
# LIMITATION: *Imported* names at each scope are
# not being included here. This is fine while we
# just care about JavaScript.
if curr_buf:
if "__file_local__" in elem.get("attributes", "").split():
# this is a file-local element in a different blob,
# don't look at it
raise KeyError
elem = elem.names[p]
except KeyError:
continue
hits.append((elem, (blob, list(lpath[:-1]))))
return hits
def toplevel_cplns(self, prefix=None, ilk=None, ctlr=None):
"""Return completion info for all top-level names matching the
given prefix and ilk in all blobs in this lib.
"prefix" is a 3-character prefix with which to filter top-level
names. If None (or not specified), results are not filtered
based on the prefix.
"ilk" is a symbol type (e.g. "class", "variable", "function")
with which to filter results. If None (or not specified),
results of any ilk are returned.
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
Returns a list of 2-tuples: (<ilk>, <name>).
Note: the list is not sorted, because often some special sorting
is required for the different completion evaluators that might use
this API.
"""
self.ensure_all_dirs_scanned(ctlr=ctlr)
cplns = []
# Naive implementation (no caching)
for dir in self.dirs:
if ctlr and ctlr.is_aborted():
log.debug("ctlr aborted")
break
try:
toplevelname_index = self.lang_zone.load_index(
dir, "toplevelname_index")
except EnvironmentError:
# No toplevelname_index for this dir likely indicates that
# there weren't any files of the current lang in this dir.
continue
cplns += toplevelname_index.toplevel_cplns(prefix=prefix, ilk=ilk)
return cplns
def _importables_from_dir(self, dir):
if dir not in self._importables_from_dir_cache:
self._importables_from_dir_cache[dir] \
= self.import_handler.find_importables_in_dir(dir)
return self._importables_from_dir_cache[dir]
def _dbsubpath_from_blobname(self, blobname, ctlr=None,
only_look_in_db=False):
"""Return the subpath to the dbfile for the given blobname,
or None if not found.
Remember that this is complicated by possible multi-level
imports. E.g. "import foo.bar" or "import foo" where 'foo'
refers to 'foo/__init__.py'.
"""
assert blobname is not None, "'blobname' cannot be None"
lang_zone = self.lang_zone
self._acquire_lock()
try:
# Use our weak cache to try to return quickly.
if blobname in self._dir_and_blobbase_from_blobname:
blobdir, blobbase \
= self._dir_and_blobbase_from_blobname[blobname]
# Check it. The actual info for that dir may have changed.
dbfile_from_blobname = lang_zone.dfb_from_dir(blobdir)
if blobbase in dbfile_from_blobname:
log.debug("have blob '%s' in '%s'? yes (in weak cache)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobbase])
# Drop from weak cache.
del self._dir_and_blobbase_from_blobname[blobname]
# Brute force: look in each dir.
blobparts = blobname.split(self.import_handler.sep)
blobbase = blobparts[-1]
for dir in self.dirs:
if ctlr and ctlr.is_aborted():
log.debug("aborting search for blob '%s' on %s: "
"ctlr aborted", blobname, self)
return None
# Is the blob in 'blobdir' (i.e. a non-multi-level import
# that has been scanned already).
blobdir = join(dir, *blobparts[:-1])
dbfile_from_blobname = lang_zone.dfb_from_dir(blobdir, {})
if self.lang == "Perl":
# Perl uses the full blob name - not just the blob base,
# see bug 89106 for details.
if blobname in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobname)
log.debug("have blob '%s' in '%s'? yes (in dir index)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobname])
if blobbase in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobbase)
log.debug("have blob '%s' in '%s'? yes (in dir index)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobbase])
importables = self._importables_from_dir(blobdir)
# 'importables' look like, for Python:
# {'foo': ('foo.py', None, False),
# 'pkg': ('pkg/__init__.py', '__init__', False)}
# for Perl:
# {'LWP': ('LWP.pm', None, True),
# 'File': (None, None, True)}
# | | | `-- is-dir-import
# | | `-- subdir-blobbase
# | `-- blobfile
# `-- blobbase
if blobbase not in importables:
continue
blobfile, subdir_blobbase, is_dir_import = importables[
blobbase]
if blobfile is None:
# There isn't an actual importable file here -- just
# a dir prefix to a multidir import.
log.debug("have blob '%s' in %s? no", blobname, self)
continue
elif os.sep in blobfile:
# This is an import from a subdir. We need to get a new
# dbf.
blobdir = join(blobdir, dirname(blobfile))
blobfile = basename(blobfile)
blobbase = subdir_blobbase
dbfile_from_blobname = lang_zone.dfb_from_dir(blobdir, {})
if blobbase in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobbase)
log.debug("have blob '%s' in '%s'? yes (in dir index)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobbase])
# The file isn't loaded.
if not only_look_in_db:
log.debug("%s importables in '%s':\n %s", self.lang,
blobdir, importables)
log.debug("'%s' likely provided by '%s' in '%s': "
"attempting load", blobname, blobfile, blobdir)
try:
buf = self.mgr.buf_from_path(
join(blobdir, blobfile), self.lang)
except (EnvironmentError, CodeIntelError) as ex:
# This can occur if the path does not exist, such as a
# broken symlink, or we don't have permission to read
# the file, or the file does not contain text.
continue
buf.scan_if_necessary()
dbfile_from_blobname = lang_zone.dfb_from_dir(blobdir, {})
if self.lang == "Perl":
# Perl uses the full blob name - not just the blob base,
# see bug 89106 for details.
if blobname in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobname)
log.debug(
"have blob '%s' in '%s'? yes (in dir index)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobname])
if blobbase in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobbase)
log.debug("have blob '%s' in '%s'? yes (after load)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobbase])
log.debug("have blob '%s' in %s? no", blobname, self)
return None
finally:
self._release_lock()
class LangTopLevelNameIndex(object):
"""A wrapper around the plain-dictionary toplevelname_index for a
LangZone dir to provide better performance for continual updating
and some simpler access.
{ilk -> toplevelname -> blobnames}
# Problem
A 'toplevelname_index' is a merge of {blobname -> ilk -> toplevelnames}
data for all resources in its dir. As those resources are
continually re-scanned (e.g. as a file is edited in Komodo), it
would be too expensive to update this index everytime.
# Solution
Keep a list of "recent updates" and only merge them into the main
data when that buf hasn't been updated in "a while" and when needed
for saving the index. Note: Buffer *removals* are not put on-deck,
but removed immediately.
# .get_blobnames(..., ilk=None)
Originally the toplevelname_index stored {toplevelname -> blobnames}.
The per-"ilk" level was added afterwards to support occassional ilk
filtering for PHP (and possible eventually other langs).
.get_blobnames() still behaves like a {toplevelname -> blobnames}
mapping, but it provides an optional "ilk" keyword arg to limit the
results to that ilk.
# Notes on locking
This class does not guard its datastructures with locking. It is up
to the LangZone using this to guard against simultaneous access on
separate threads.
"""
def __init__(self, data=None, timeout=90):
# toplevelname_index data: {ilk -> toplevelname -> blobnames}
if data is None:
self._data = {}
else:
self._data = data
# Time (in seconds) to hold a change "on deck".
# Timed-out changes are merged on .get() and .update().
self.timeout = timeout
self._on_deck = {
# basename # the basename of the buf path
# -> [timestamp, # time of the last update
# # The dict in res_index, a.k.a. 'res_data'
# {blobname -> ilk -> toplevelnames},
# # Lazily generated pivot, a.k.a. 'res_data_pivot'
# {ilk -> toplevelname -> blobnames}
# ]
}
def __repr__(self):
num_toplevelnames = sum(len(v) for v in self._data.values())
return ("<LangTopLevelNameIndex: %d top-level name(s), "
"%d update(s) on-deck>"
% (num_toplevelnames, len(self._on_deck)))
def merge(self):
"""Merge all on-deck changes with `self.data'."""
for base, (timestamp, res_data,
res_data_pivot) in list(self._on_deck.items()):
if res_data_pivot is None:
res_data_pivot = self._pivot_res_data(res_data)
# res_data_pivot: {ilk -> toplevelname -> blobnames}
# "bft" means blobnames_from_toplevelname
for ilk, bft in res_data_pivot.items():
data_bft = self._data.setdefault(ilk, {})
for toplevelname, blobnames in bft.items():
if toplevelname not in data_bft:
data_bft[toplevelname] = blobnames
else:
data_bft[toplevelname].update(blobnames)
del self._on_deck[base]
def merge_expired(self, now):
"""Merge expired on-deck changes with `self.data'."""
for base, (timestamp, res_data,
res_data_pivot) in list(self._on_deck.items()):
if now - timestamp < self.timeout:
continue
if res_data_pivot is None:
res_data_pivot = self._pivot_res_data(res_data)
# res_data_pivot: {ilk -> toplevelname -> blobnames}
# "bft" means blobnames_from_toplevelname
for ilk, bft in res_data_pivot.items():
data_bft = self._data.setdefault(ilk, {})
for toplevelname, blobnames in bft.items():
if toplevelname not in data_bft:
data_bft[toplevelname] = blobnames
else:
data_bft[toplevelname].update(blobnames)
del self._on_deck[base]
@property
def data(self):
self.merge()
return self._data
def update(self, base, old_res_data, new_res_data):
now = time.time()
self.remove(base, old_res_data)
self._on_deck[base] = [now, new_res_data, None]
self.merge_expired(now)
def remove(self, base, old_res_data):
if base in self._on_deck:
del self._on_deck[base]
else:
# Remove old refs from current data.
# old_res_data: {blobname -> ilk -> toplevelnames}
# self._data: {ilk -> toplevelname -> blobnames}
for blobname, toplevelnames_from_ilk in old_res_data.items():
for ilk, toplevelnames in toplevelnames_from_ilk.items():
for toplevelname in toplevelnames:
try:
self._data[ilk][toplevelname].remove(blobname)
except KeyError:
pass # ignore this for now, might indicate corruption
else:
if not self._data[ilk][toplevelname]:
del self._data[ilk][toplevelname]
if not self._data.get(ilk):
del self._data[ilk]
def _pivot_res_data(self, res_data):
# res_data: {blobname -> ilk -> toplevelnames}
# res_data_pivot: {ilk -> toplevelname -> blobnames}
res_data_pivot = {}
for blobname, toplevelnames_from_ilk in res_data.items():
for ilk, toplevelnames in toplevelnames_from_ilk.items():
pivot_bft = res_data_pivot.setdefault(ilk, {})
for toplevelname in toplevelnames:
if toplevelname not in pivot_bft:
pivot_bft[toplevelname] = set([blobname])
else:
pivot_bft[toplevelname].add(blobname)
return res_data_pivot
def toplevel_cplns(self, prefix=None, ilk=None):
"""Return completion info for all top-level names matching the
given prefix and ilk.
"prefix" is a 3-character prefix with which to filter top-level
names. If None (or not specified), results are not filtered
based on the prefix.
"ilk" is a symbol type (e.g. "class", "variable", "function")
with which to filter results. If None (or not specified),
results of any ilk are returned.
Returns a list of 2-tuples: (<ilk>, <name>).
"""
self.merge_expired(time.time())
# Need to check merged and on-deck items:
cplns = []
# ...on-deck items
for base, (timestamp, res_data,
res_data_pivot) in list(self._on_deck.items()):
if res_data_pivot is None:
res_data_pivot = self._on_deck[base][2] \
= self._pivot_res_data(res_data)
# res_data_pivot: {ilk -> toplevelname -> blobnames}
if ilk is None:
for i, bft in res_data_pivot.items():
cplns += [(i, toplevelname) for toplevelname in bft]
elif ilk in res_data_pivot:
cplns += [(ilk, toplevelname)
for toplevelname in res_data_pivot[ilk]]
# ...merged data
# self._data: {ilk -> toplevelname -> blobnames}
if ilk is None:
for i, bft in self._data.items():
cplns += [(i, toplevelname) for toplevelname in bft]
elif ilk in self._data:
cplns += [(ilk, toplevelname)
for toplevelname in self._data[ilk]]
# Naive implementation: Instead of maintaining a separate
# 'toplevelprefix_index' (as we do for StdLibsZone and CatalogsZone)
# for now we'll just gather all results and filter on the prefix
# here. Only if this proves to be a perf issue will we add the
# complexity of another index:
# {ilk -> prefix -> toplevelnames}
if prefix is not None:
cplns = [(i, t) for i, t in cplns if t.startswith(prefix)]
return cplns
def get_blobnames(self, toplevelname, default=None, ilk=None):
"""Return the blobnames defining the given toplevelname.
If "ilk" is given then only symbols of that ilk will be considered.
If not match is found the "default" is returned.
"""
self.merge_expired(time.time())
blobnames = set()
# First check on-deck items.
for base, (timestamp, res_data,
res_data_pivot) in list(self._on_deck.items()):
if res_data_pivot is None:
res_data_pivot = self._on_deck[base][2] \
= self._pivot_res_data(res_data)
# res_data_pivot: {ilk -> toplevelname -> blobnames}
if ilk is None:
for bft in res_data_pivot.values():
if toplevelname in bft:
blobnames.update(bft[toplevelname])
elif ilk in res_data_pivot:
if toplevelname in res_data_pivot[ilk]:
blobnames.update(res_data_pivot[ilk][toplevelname])
# TODO: Put lookup in merged data ahead of lookup in on-deck -- so
# we don't do on-deck work if not necessary.
# Then, fallback to already merged data.
# self._data: {ilk -> toplevelname -> blobnames}
if ilk is None:
for bft in self._data.values():
if toplevelname in bft:
blobnames.update(bft[toplevelname])
elif ilk in self._data:
if toplevelname in self._data[ilk]:
blobnames.update(self._data[ilk][toplevelname])
if blobnames:
return blobnames
return default
class LangZone(object):
"""Singleton zone managing a particular db/$lang/... area.
# caching and memory control
We cache all retrieved indices and blobs and maintain their latest
access time. To try to manage memory consumption, we rely on a
bookkeeper thread (the indexer) to periodically call .cull_mem() --
which unloads cache items that have not been accessed in a while.
(TODO:
- Get the indexer to actually call .cull_mem() and .save()
periodically.
- Test that .cull_mem() actually results in the process releasing
memory.)
# robustness (TODO)
Work should be done to improve robustness.
- Collect filesystem interactions in one place.
- Rationalize OSError handling.
- Consider a journal system, if necessary/feasible. My hope is to
get away without one and rely on graceful recovery. The db does
not store critical info so can allow some loss of data (it can all
be regenerated).
"""
toplevelname_index_class = LangTopLevelNameIndex
def __init__(self, mgr, lang):
self.mgr = mgr
self.db = mgr.db
self.lang = lang
self.base_dir = join(self.db.base_dir, "db",
util.safe_lang_from_lang(lang))
self._check_lang(lang)
self._hook_handlers = self.mgr.hook_handlers_from_lang(lang)
self._lock = threading.RLock()
self._dhash_from_dir_cache = {}
self._dirslib_cache = {}
# We cache the set of recent indeces and blobs in memory.
# {db-subpath: [index-object, <atime>]),
# ...}
# For example:
# {'7bce640bc48751b128af5c8bf5df8412/res_index':
# [<res-index>, 1158289000]),
# ...}
self._index_and_atime_from_dbsubpath = {}
# TODO-PERF: Use set() object for this? Compare perf.
self._is_index_dirty_from_dbsubpath = {} # set of dirty indeces
# TODO: blob caching and *use* this
# self._blob_and_atime_from_dbsubpath = {}
# XXX Need a 'dirty-set' for blobs? No, because currently
# .update_buf_data() saves blob changes to disk immediately. Not
# sure that is best for perf. Definitely not ideal for the
# "editset".
def __repr__(self):
return "<%s lang db>" % self.lang
def _acquire_lock(self):
self._lock.acquire()
def _release_lock(self):
self._lock.release()
def _check_lang(self, lang):
"""Ensure that the given lang matches case exactly with the lang
in the db. If this invariant is broken, then weird things with
caching can result.
"""
if exists(self.base_dir):
lang_path = join(self.base_dir, "lang")
try:
fin = open(lang_path, 'r')
except EnvironmentError as ex:
self.db.corruption("LangZone._check_lang",
"could not open `%s': %s" % (lang_path, ex),
"recover")
fin = open(lang_path, 'w')
try:
fin.write(lang)
finally:
fin.close()
else:
try:
lang_on_disk = fin.read().strip()
finally:
fin.close()
assert lang_on_disk == lang
# TODO: If Database.dhash_from_dir() grows caching, then this
# shouldn't bother.
def dhash_from_dir(self, dir):
if dir not in self._dhash_from_dir_cache:
self._dhash_from_dir_cache[dir] = self.db.dhash_from_dir(dir)
return self._dhash_from_dir_cache[dir]
def dfb_from_dir(self, dir, default=None):
"""Get the {blobname -> dbfile} mapping index for the given dir.
'dfb' stands for 'dbfile_from_blobname'.
This must be called with the lock held.
"""
return self.load_index(dir, "blob_index", default)
def get_buf_scan_time(self, buf):
# TODO Canonicalize path (or assert that it is canonicalized)
self._acquire_lock()
try:
dir, base = split(buf.path)
res_index = self.load_index(dir, "res_index", {})
if base not in res_index:
return None
return res_index[base][0]
finally:
self._release_lock()
def get_buf_data(self, buf):
# TODO Canonicalize path (or assert that it is canonicalized)
# Should have a Resource object that we pass around that
# handles all of this.
self._acquire_lock()
try:
dir, base = split(buf.path)
res_index = self.load_index(dir, "res_index", {})
if base not in res_index:
raise NotFoundInDatabase("%s buffer '%s' not found in database"
% (buf.lang, buf.path))
scan_time, scan_error, res_data = res_index[base]
blob_from_lang = {}
if res_data:
try:
dbfile_from_blobname = self.dfb_from_dir(dir)
except EnvironmentError as ex:
# DB corruption will be noted in remove_buf_data()
self.remove_buf_data(buf)
raise NotFoundInDatabase("%s buffer '%s' not found in database"
% (buf.lang, buf.path))
dhash = self.dhash_from_dir(dir)
for blobname in res_data:
dbsubpath = join(dhash, dbfile_from_blobname[blobname])
try:
blob = self.load_blob(dbsubpath)
except ET.XMLParserError as ex:
self.db.corruption("LangZone.get_buf_data",
"could not parse dbfile for '%s' blob: %s"
% (blobname, ex),
"recover")
self.remove_buf_data(buf)
raise NotFoundInDatabase(
"`%s' buffer `%s' blob was corrupted in database"
% (buf.path, blobname))
except EnvironmentError as ex:
self.db.corruption("LangZone.get_buf_data",
"could not read dbfile for '%s' blob: %s"
% (blobname, ex),
"recover")
self.remove_buf_data(buf)
raise NotFoundInDatabase(
"`%s' buffer `%s' blob not found in database"
% (buf.path, blobname))
lang = blob.get("lang")
assert lang is not None
blob_from_lang[lang] = blob
return scan_time, scan_error, blob_from_lang
finally:
self._release_lock()
def remove_path(self, path):
"""Remove the given resource from the database."""
# TODO Canonicalize path (or assert that it is canonicalized)
# Should have a Resource object that we pass around that
# handles all of this.
self._acquire_lock()
try:
dir, base = split(path)
res_index = self.load_index(dir, "res_index", {})
try:
scan_time, scan_error, res_data = res_index[base]
except KeyError:
# This resource isn't loaded in the db. Nothing to remove.
return
try:
blob_index = self.load_index(dir, "blob_index")
except EnvironmentError as ex:
self.db.corruption("LangZone.remove_path",
"could not read blob_index for '%s' dir: %s" % (
dir, ex),
"recover")
blob_index = {}
is_hits_from_lpath_lang = self.lang in self.db.import_everything_langs
if is_hits_from_lpath_lang:
try:
toplevelname_index = self.load_index(
dir, "toplevelname_index")
except EnvironmentError as ex:
self.db.corruption("LangZone.remove_path",
"could not read toplevelname_index for '%s' dir: %s"
% (dir, ex),
"recover")
toplevelname_index = self.toplevelname_index_class()
dhash = self.dhash_from_dir(dir)
del res_index[base]
for blobname in res_data:
try:
dbfile = blob_index[blobname]
except KeyError:
blob_index_path = join(dhash, "blob_index")
self.db.corruption("LangZone.remove_path",
"'%s' blob not in '%s'"
% (blobname, blob_index_path),
"ignore")
continue
del blob_index[blobname]
for path in glob(join(self.base_dir, dhash, dbfile+".*")):
log.debug("fs-write: remove %s blob file '%s/%s'",
self.lang, dhash, basename(path))
os.remove(path)
if is_hits_from_lpath_lang:
toplevelname_index.remove(base, res_data)
self.changed_index(dir, "res_index")
self.changed_index(dir, "blob_index")
if is_hits_from_lpath_lang:
self.changed_index(dir, "toplevelname_index")
finally:
self._release_lock()
# TODO Database.clean() should remove dirs that have no
# blob_index entries.
def remove_buf_data(self, buf):
"""Remove the given buffer from the database."""
self.remove_path(buf.path)
def update_buf_data(self, buf, scan_tree, scan_time, scan_error,
skip_scan_time_check=False):
"""Update this LangZone with the buffer data.
@param buf {CitadelBuffer} the buffer whose data is being added
to the database.
@param scan_tree {ciElementTree} the CIX scan data. Might be None
if there was an early scanning failure.
@param scan_time {timestamp} the time of the scan, typically the
mtime of the file
@param scan_error {str} an error string if scanning failed, or
None if it was succesful.
@param skip_scan_time_check {boolean} (default False) is a
boolean indicating if the buffer data should be updated even
if `scan_time` is <= that in the database.
"""
self._acquire_lock()
try:
# TODO: Canonicalize path (or assert that it is canonicalized)
dir, base = split(buf.path)
# Get the current data, if any.
res_index = self.load_index(dir, "res_index", {})
res_index_has_changed = False
blob_index = self.load_index(dir, "blob_index", {})
blob_index_has_changed = False
is_hits_from_lpath_lang = self.lang in self.db.import_everything_langs
if is_hits_from_lpath_lang:
# TODO: Not sure {} for a default is correct here.
toplevelname_index = self.load_index(
dir, "toplevelname_index", {})
toplevelname_index_has_changed = False
try:
(old_scan_time, old_scan_error, old_res_data) = res_index[base]
except KeyError: # adding a new entry
(old_scan_time, old_scan_error, old_res_data) = None, None, {}
else: # updating an existing entry
if not skip_scan_time_check and scan_time is not None \
and scan_time <= old_scan_time:
log.debug("skipping db update for '%s': %s < %s and "
"no 'skip_scan_time_check' option",
base, scan_time, old_scan_time)
return
log.debug("update from %s buf '%s'", buf.lang, buf.path)
# Parse the tree and get the list of blobnames.
# res_data: {blobname -> ilk -> toplevelnames}
new_res_data = {}
new_blobnames_and_blobs = []
if scan_tree:
for blob in scan_tree[0]:
lang = blob.get("lang")
assert blob.get("lang") == self.lang, "'%s' != '%s' (blob %r)" % (
blob.get("lang"), self.lang, blob)
blobname = blob.get("name")
toplevelnames_from_ilk = new_res_data.setdefault(