Skip to content

Commit 4f922d9

Browse files
committed
Rename token_idx_ funcs to simply token_ funcs
1 parent 711744d commit 4f922d9

7 files changed

Lines changed: 104 additions & 104 deletions

File tree

examples/column_defs_lowlevel.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,16 +17,16 @@ def extract_definitions(token_list):
1717
definitions = []
1818
tmp = []
1919
# grab the first token, ignoring whitespace. idx=1 to skip open (
20-
tidx, token = token_list.token_idx_next(1)
20+
tidx, token = token_list.token_next(1)
2121
while token and not token.match(sqlparse.tokens.Punctuation, ')'):
2222
tmp.append(token)
2323
# grab the next token, this times including whitespace
24-
tidx, token = token_list.token_idx_next(tidx, skip_ws=False)
24+
tidx, token = token_list.token_next(tidx, skip_ws=False)
2525
# split on ",", except when on end of statement
2626
if token and token.match(sqlparse.tokens.Punctuation, ','):
2727
definitions.append(tmp)
2828
tmp = []
29-
tidx, token = token_list.token_idx_next(tidx)
29+
tidx, token = token_list.token_next(tidx)
3030
if tmp and isinstance(tmp[0], sqlparse.sql.Identifier):
3131
definitions.append(tmp)
3232
return definitions
@@ -41,7 +41,7 @@ def extract_definitions(token_list):
4141
parsed = sqlparse.parse(SQL)[0]
4242

4343
# extract the parenthesis which holds column definitions
44-
_, par = parsed.token_idx_next_by(i=sqlparse.sql.Parenthesis)
44+
_, par = parsed.token_next_by(i=sqlparse.sql.Parenthesis)
4545
columns = extract_definitions(par)
4646

4747
for column in columns:

sqlparse/engine/grouping.py

Lines changed: 50 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,16 @@ def _group_left_right(tlist, m, cls,
3232
continue
3333

3434
tidx = tlist.token_index(token)
35-
pidx, prev_ = tlist.token_idx_prev(tidx)
36-
nidx, next_ = tlist.token_idx_next(tidx)
35+
pidx, prev_ = tlist.token_prev(tidx)
36+
nidx, next_ = tlist.token_next(tidx)
3737

3838
if valid_left(prev_) and valid_right(next_):
3939
if semicolon:
4040
# only overwrite if a semicolon present.
41-
snidx, _ = tlist.token_idx_next_by(m=M_SEMICOLON, idx=nidx)
41+
snidx, _ = tlist.token_next_by(m=M_SEMICOLON, idx=nidx)
4242
nidx = snidx or nidx
4343
# Luckily, this leaves the position of `token` intact.
44-
tlist.group_tokens_between(cls, pidx, nidx, extend=True)
44+
tlist.group_tokens(cls, pidx, nidx, extend=True)
4545

4646

4747
def _group_matching(tlist, cls):
@@ -64,7 +64,7 @@ def _group_matching(tlist, cls):
6464
# this indicates invalid sql and unbalanced tokens.
6565
# instead of break, continue in case other "valid" groups exist
6666
continue
67-
tlist.group_tokens_between(cls, open_token, token)
67+
tlist.group_tokens(cls, open_token, token)
6868

6969

7070
def group_if(tlist):
@@ -115,10 +115,10 @@ def group_case(tlist):
115115
def group_identifier(tlist):
116116
T_IDENT = (T.String.Symbol, T.Name)
117117

118-
tidx, token = tlist.token_idx_next_by(t=T_IDENT)
118+
tidx, token = tlist.token_next_by(t=T_IDENT)
119119
while token:
120-
tlist.group_tokens_between(sql.Identifier, tidx, tidx)
121-
tidx, token = tlist.token_idx_next_by(t=T_IDENT, idx=tidx + 1)
120+
tlist.group_tokens(sql.Identifier, tidx, tidx)
121+
tidx, token = tlist.token_next_by(t=T_IDENT, idx=tidx + 1)
122122

123123

124124
def group_period(tlist):
@@ -133,14 +133,14 @@ def group_period(tlist):
133133

134134

135135
def group_arrays(tlist):
136-
tidx, token = tlist.token_idx_next_by(i=sql.SquareBrackets)
136+
tidx, token = tlist.token_next_by(i=sql.SquareBrackets)
137137
while token:
138-
pidx, prev = tlist.token_idx_prev(tidx)
139-
if imt(prev, i=(sql.SquareBrackets, sql.Identifier, sql.Function),
138+
pidx, prev_ = tlist.token_prev(tidx)
139+
if imt(prev_, i=(sql.SquareBrackets, sql.Identifier, sql.Function),
140140
t=(T.Name, T.String.Symbol,)):
141-
tlist.group_tokens_between(sql.Identifier, pidx, tidx, extend=True)
141+
tlist.group_tokens(sql.Identifier, pidx, tidx, extend=True)
142142
tidx = pidx
143-
tidx, token = tlist.token_idx_next_by(i=sql.SquareBrackets, idx=tidx + 1)
143+
tidx, token = tlist.token_next_by(i=sql.SquareBrackets, idx=tidx + 1)
144144

145145

146146
@recurse(sql.Identifier)
@@ -151,18 +151,18 @@ def group_operator(tlist):
151151
T_CYCLE = T_NUMERICAL + T_STRING + T_NAME
152152
func = lambda tk: imt(tk, i=I_CYCLE, t=T_CYCLE)
153153

154-
tidx, token = tlist.token_idx_next_by(t=(T.Operator, T.Wildcard))
154+
tidx, token = tlist.token_next_by(t=(T.Operator, T.Wildcard))
155155
while token:
156-
pidx, prev_ = tlist.token_idx_prev(tidx)
157-
nidx, next_ = tlist.token_idx_next(tidx)
156+
pidx, prev_ = tlist.token_prev(tidx)
157+
nidx, next_ = tlist.token_next(tidx)
158158

159159
if func(prev_) and func(next_):
160160
token.ttype = T.Operator
161-
tlist.group_tokens_between(sql.Operation, pidx, nidx)
161+
tlist.group_tokens(sql.Operation, pidx, nidx)
162162
tidx = pidx
163163

164-
tidx, token = tlist.token_idx_next_by(t=(T.Operator, T.Wildcard),
165-
idx=tidx + 1)
164+
tidx, token = tlist.token_next_by(t=(T.Operator, T.Wildcard),
165+
idx=tidx + 1)
166166

167167

168168
@recurse(sql.IdentifierList)
@@ -174,15 +174,15 @@ def group_identifier_list(tlist):
174174

175175
func = lambda t: imt(t, i=I_IDENT_LIST, m=M_ROLE, t=T_IDENT_LIST)
176176

177-
tidx, token = tlist.token_idx_next_by(m=M_COMMA)
177+
tidx, token = tlist.token_next_by(m=M_COMMA)
178178
while token:
179-
pidx, prev_ = tlist.token_idx_prev(tidx)
180-
nidx, next_ = tlist.token_idx_next(tidx)
179+
pidx, prev_ = tlist.token_prev(tidx)
180+
nidx, next_ = tlist.token_next(tidx)
181181

182182
if func(prev_) and func(next_):
183-
tlist.group_tokens_between(sql.IdentifierList, pidx, nidx, extend=True)
183+
tlist.group_tokens(sql.IdentifierList, pidx, nidx, extend=True)
184184
tidx = pidx
185-
tidx, token = tlist.token_idx_next_by(m=M_COMMA, idx=tidx + 1)
185+
tidx, token = tlist.token_next_by(m=M_COMMA, idx=tidx + 1)
186186

187187

188188
def group_brackets(tlist):
@@ -195,45 +195,45 @@ def group_parenthesis(tlist):
195195

196196
@recurse(sql.Comment)
197197
def group_comments(tlist):
198-
tidx, token = tlist.token_idx_next_by(t=T.Comment)
198+
tidx, token = tlist.token_next_by(t=T.Comment)
199199
while token:
200200
end = tlist.token_not_matching(
201201
lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx + 1)
202202
if end is not None:
203203
eidx = tlist.token_index(end)
204-
eidx, end = tlist.token_idx_prev(eidx, skip_ws=False)
205-
tlist.group_tokens_between(sql.Comment, tidx, eidx)
204+
eidx, end = tlist.token_prev(eidx, skip_ws=False)
205+
tlist.group_tokens(sql.Comment, tidx, eidx)
206206

207-
tidx, token = tlist.token_idx_next_by(t=T.Comment, idx=tidx + 1)
207+
tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx + 1)
208208

209209

210210
@recurse(sql.Where)
211211
def group_where(tlist):
212-
tidx, token = tlist.token_idx_next_by(m=sql.Where.M_OPEN)
212+
tidx, token = tlist.token_next_by(m=sql.Where.M_OPEN)
213213
while token:
214-
eidx, end = tlist.token_idx_next_by(m=sql.Where.M_CLOSE, idx=tidx + 1)
214+
eidx, end = tlist.token_next_by(m=sql.Where.M_CLOSE, idx=tidx + 1)
215215

216216
if end is None:
217217
end = tlist._groupable_tokens[-1]
218218
else:
219219
end = tlist.tokens[eidx - 1]
220220
# TODO: convert this to eidx instead of end token.
221221
# i think above values are len(tlist) and eidx-1
222-
tlist.group_tokens_between(sql.Where, tidx, end)
223-
tidx, token = tlist.token_idx_next_by(m=sql.Where.M_OPEN, idx=tidx + 1)
222+
tlist.group_tokens(sql.Where, tidx, end)
223+
tidx, token = tlist.token_next_by(m=sql.Where.M_OPEN, idx=tidx + 1)
224224

225225

226226
@recurse()
227227
def group_aliased(tlist):
228228
I_ALIAS = (sql.Parenthesis, sql.Function, sql.Case, sql.Identifier,
229229
sql.Operation)
230230

231-
tidx, token = tlist.token_idx_next_by(i=I_ALIAS, t=T.Number)
231+
tidx, token = tlist.token_next_by(i=I_ALIAS, t=T.Number)
232232
while token:
233-
nidx, next_ = tlist.token_idx_next(tidx)
233+
nidx, next_ = tlist.token_next(tidx)
234234
if imt(next_, i=sql.Identifier):
235-
tlist.group_tokens_between(sql.Identifier, tidx, nidx, extend=True)
236-
tidx, token = tlist.token_idx_next_by(i=I_ALIAS, t=T.Number, idx=tidx + 1)
235+
tlist.group_tokens(sql.Identifier, tidx, nidx, extend=True)
236+
tidx, token = tlist.token_next_by(i=I_ALIAS, t=T.Number, idx=tidx + 1)
237237

238238

239239
def group_typecasts(tlist):
@@ -252,34 +252,34 @@ def group_functions(tlist):
252252
if has_create and has_table:
253253
return
254254

255-
tidx, token = tlist.token_idx_next_by(t=T.Name)
255+
tidx, token = tlist.token_next_by(t=T.Name)
256256
while token:
257-
nidx, next_ = tlist.token_idx_next(tidx)
257+
nidx, next_ = tlist.token_next(tidx)
258258
if isinstance(next_, sql.Parenthesis):
259-
tlist.group_tokens_between(sql.Function, tidx, nidx)
260-
tidx, token = tlist.token_idx_next_by(t=T.Name, idx=tidx + 1)
259+
tlist.group_tokens(sql.Function, tidx, nidx)
260+
tidx, token = tlist.token_next_by(t=T.Name, idx=tidx + 1)
261261

262262

263263
def group_order(tlist):
264264
"""Group together Identifier and Asc/Desc token"""
265-
tidx, token = tlist.token_idx_next_by(t=T.Keyword.Order)
265+
tidx, token = tlist.token_next_by(t=T.Keyword.Order)
266266
while token:
267-
pidx, prev = tlist.token_idx_prev(tidx)
268-
if imt(prev, i=sql.Identifier, t=T.Number):
269-
tlist.group_tokens_between(sql.Identifier, pidx, tidx)
267+
pidx, prev_ = tlist.token_prev(tidx)
268+
if imt(prev_, i=sql.Identifier, t=T.Number):
269+
tlist.group_tokens(sql.Identifier, pidx, tidx)
270270
tidx = pidx
271-
tidx, token = tlist.token_idx_next_by(t=T.Keyword.Order, idx=tidx + 1)
271+
tidx, token = tlist.token_next_by(t=T.Keyword.Order, idx=tidx + 1)
272272

273273

274274
@recurse()
275275
def align_comments(tlist):
276-
tidx, token = tlist.token_idx_next_by(i=sql.Comment)
276+
tidx, token = tlist.token_next_by(i=sql.Comment)
277277
while token:
278-
pidx, prev = tlist.token_idx_prev(tidx)
279-
if isinstance(prev, sql.TokenList):
280-
tlist.group_tokens_between(sql.TokenList, pidx, tidx, extend=True)
278+
pidx, prev_ = tlist.token_prev(tidx)
279+
if isinstance(prev_, sql.TokenList):
280+
tlist.group_tokens(sql.TokenList, pidx, tidx, extend=True)
281281
tidx = pidx
282-
tidx, token = tlist.token_idx_next_by(i=sql.Comment, idx=tidx + 1)
282+
tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx + 1)
283283

284284

285285
def group(stmt):

sqlparse/filters/aligned_indent.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def _process_statement(self, tlist):
4646

4747
def _process_parenthesis(self, tlist):
4848
# if this isn't a subquery, don't re-indent
49-
_, token = tlist.token_idx_next_by(m=(T.DML, 'SELECT'))
49+
_, token = tlist.token_next_by(m=(T.DML, 'SELECT'))
5050
if token is not None:
5151
with indent(self):
5252
tlist.insert_after(tlist[0], self.nl('SELECT'))
@@ -67,7 +67,7 @@ def _process_case(self, tlist):
6767
offset_ = len('case ') + len('when ')
6868
cases = tlist.get_cases(skip_ws=True)
6969
# align the end as well
70-
_, end_token = tlist.token_idx_next_by(m=(T.Keyword, 'END'))
70+
_, end_token = tlist.token_next_by(m=(T.Keyword, 'END'))
7171
cases.append((None, [end_token]))
7272

7373
condition_width = [len(' '.join(map(text_type, cond))) if cond else 0
@@ -88,7 +88,7 @@ def _process_case(self, tlist):
8888

8989
def _next_token(self, tlist, idx=0):
9090
split_words = T.Keyword, self.split_words, True
91-
tidx, token = tlist.token_idx_next_by(m=split_words, idx=idx)
91+
tidx, token = tlist.token_next_by(m=split_words, idx=idx)
9292
# treat "BETWEEN x and y" as a single statement
9393
if token and token.normalized == 'BETWEEN':
9494
tidx, token = self._next_token(tlist, tidx + 1)
@@ -113,9 +113,9 @@ def _process_default(self, tlist):
113113
# process any sub-sub statements
114114
for sgroup in tlist.get_sublists():
115115
idx = tlist.token_index(sgroup)
116-
pidx, prev = tlist.token_idx_prev(idx)
116+
pidx, prev_ = tlist.token_prev(idx)
117117
# HACK: make "group/order by" work. Longer than max_len.
118-
offset_ = 3 if (prev and prev.match(T.Keyword, 'BY')) else 0
118+
offset_ = 3 if (prev_ and prev_.match(T.Keyword, 'BY')) else 0
119119
with offset(self, offset_):
120120
self._process(sgroup)
121121

sqlparse/filters/others.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,12 @@ class StripCommentsFilter(object):
1414
def _process(tlist):
1515
def get_next_comment():
1616
# TODO(andi) Comment types should be unified, see related issue38
17-
return tlist.token_idx_next_by(i=sql.Comment, t=T.Comment)
17+
return tlist.token_next_by(i=sql.Comment, t=T.Comment)
1818

1919
tidx, token = get_next_comment()
2020
while token:
21-
pidx, prev_ = tlist.token_idx_prev(tidx, skip_ws=False)
22-
nidx, next_ = tlist.token_idx_next(tidx, skip_ws=False)
21+
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
22+
nidx, next_ = tlist.token_next(tidx, skip_ws=False)
2323
# Replace by whitespace if prev and next exist and if they're not
2424
# whitespaces. This doesn't apply if prev or next is a paranthesis.
2525
if (prev_ is None or next_ is None or
@@ -87,19 +87,19 @@ class SpacesAroundOperatorsFilter(object):
8787
def _process(tlist):
8888

8989
ttypes = (T.Operator, T.Comparison)
90-
tidx, token = tlist.token_idx_next_by(t=ttypes)
90+
tidx, token = tlist.token_next_by(t=ttypes)
9191
while token:
92-
nidx, next_ = tlist.token_idx_next(tidx, skip_ws=False)
92+
nidx, next_ = tlist.token_next(tidx, skip_ws=False)
9393
if next_ and next_.ttype != T.Whitespace:
9494
tlist.insert_after(tidx, sql.Token(T.Whitespace, ' '))
9595

96-
pidx, prev_ = tlist.token_idx_prev(tidx, skip_ws=False)
96+
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
9797
if prev_ and prev_.ttype != T.Whitespace:
9898
tlist.insert_before(tidx, sql.Token(T.Whitespace, ' '))
9999
tidx += 1 # has to shift since token inserted before it
100100

101101
# assert tlist.token_index(token) == tidx
102-
tidx, token = tlist.token_idx_next_by(t=ttypes, idx=tidx + 1)
102+
tidx, token = tlist.token_next_by(t=ttypes, idx=tidx + 1)
103103

104104
def process(self, stmt):
105105
[self.process(sgroup) for sgroup in stmt.get_sublists()]

sqlparse/filters/reindent.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def _next_token(self, tlist, idx=0):
4949
'GROUP', 'ORDER', 'UNION', 'VALUES',
5050
'SET', 'BETWEEN', 'EXCEPT', 'HAVING')
5151
m_split = T.Keyword, split_words, True
52-
tidx, token = tlist.token_idx_next_by(m=m_split, idx=idx)
52+
tidx, token = tlist.token_next_by(m=m_split, idx=idx)
5353

5454
if token and token.normalized == 'BETWEEN':
5555
tidx, token = self._next_token(tlist, tidx + 1)
@@ -63,10 +63,10 @@ def _split_kwds(self, tlist):
6363
tidx, token = self._next_token(tlist)
6464
while token:
6565
tidx = tlist.token_index(token)
66-
pidx, prev = tlist.token_idx_prev(tidx, skip_ws=False)
67-
uprev = text_type(prev)
66+
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
67+
uprev = text_type(prev_)
6868

69-
if prev and prev.is_whitespace():
69+
if prev_ and prev_.is_whitespace():
7070
del tlist.tokens[pidx]
7171
tidx -= 1
7272

@@ -77,17 +77,17 @@ def _split_kwds(self, tlist):
7777
tidx, token = self._next_token(tlist, tidx + 1)
7878

7979
def _split_statements(self, tlist):
80-
tidx, token = tlist.token_idx_next_by(t=(T.Keyword.DDL, T.Keyword.DML))
80+
tidx, token = tlist.token_next_by(t=(T.Keyword.DDL, T.Keyword.DML))
8181
while token:
82-
pidx, prev = tlist.token_idx_prev(tidx, skip_ws=False)
83-
if prev and prev.is_whitespace():
82+
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
83+
if prev_ and prev_.is_whitespace():
8484
del tlist.tokens[pidx]
8585
tidx -= 1
8686
# only break if it's not the first token
87-
if prev:
87+
if prev_:
8888
tlist.insert_before(tidx, self.nl())
8989
tidx += 1
90-
tidx, token = tlist.token_idx_next_by(
90+
tidx, token = tlist.token_next_by(
9191
t=(T.Keyword.DDL, T.Keyword.DML), idx=tidx + 1)
9292

9393
def _process(self, tlist):
@@ -96,7 +96,7 @@ def _process(self, tlist):
9696
func(tlist)
9797

9898
def _process_where(self, tlist):
99-
tidx, token = tlist.token_idx_next_by(m=(T.Keyword, 'WHERE'))
99+
tidx, token = tlist.token_next_by(m=(T.Keyword, 'WHERE'))
100100
# issue121, errors in statement fixed??
101101
tlist.insert_before(tidx, self.nl())
102102

@@ -105,8 +105,8 @@ def _process_where(self, tlist):
105105

106106
def _process_parenthesis(self, tlist):
107107
ttypes = T.Keyword.DML, T.Keyword.DDL
108-
_, is_dml_dll = tlist.token_idx_next_by(t=ttypes)
109-
fidx, first = tlist.token_idx_next_by(m=sql.Parenthesis.M_OPEN)
108+
_, is_dml_dll = tlist.token_next_by(t=ttypes)
109+
fidx, first = tlist.token_next_by(m=sql.Parenthesis.M_OPEN)
110110

111111
with indent(self, 1 if is_dml_dll else 0):
112112
tlist.tokens.insert(0, self.nl()) if is_dml_dll else None
@@ -143,7 +143,7 @@ def _process_case(self, tlist):
143143
# len "when ", "then ", "else "
144144
with offset(self, len("WHEN ")):
145145
self._process_default(tlist)
146-
end_idx, end = tlist.token_idx_next_by(m=sql.Case.M_CLOSE)
146+
end_idx, end = tlist.token_next_by(m=sql.Case.M_CLOSE)
147147
tlist.insert_before(end_idx, self.nl())
148148

149149
def _process_default(self, tlist, stmts=True):

0 commit comments

Comments
 (0)