tabnanny.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. #! /usr/bin/env python
  2. """The Tab Nanny despises ambiguous indentation. She knows no mercy.
  3. tabnanny -- Detection of ambiguous indentation
  4. For the time being this module is intended to be called as a script.
  5. However it is possible to import it into an IDE and use the function
  6. check() described below.
  7. Warning: The API provided by this module is likely to change in future
  8. releases; such changes may not be backward compatible.
  9. """
  10. # Released to the public domain, by Tim Peters, 15 April 1998.
  11. # XXX Note: this is now a standard library module.
  12. # XXX The API needs to undergo changes however; the current code is too
  13. # XXX script-like. This will be addressed later.
  14. __version__ = "6"
  15. import os
  16. import sys
  17. import getopt
  18. import tokenize
  19. if not hasattr(tokenize, 'NL'):
  20. raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
  21. __all__ = ["check", "NannyNag", "process_tokens"]
  22. verbose = 0
  23. filename_only = 0
  24. def errprint(*args):
  25. sep = ""
  26. for arg in args:
  27. sys.stderr.write(sep + str(arg))
  28. sep = " "
  29. sys.stderr.write("\n")
  30. def main():
  31. global verbose, filename_only
  32. try:
  33. opts, args = getopt.getopt(sys.argv[1:], "qv")
  34. except getopt.error, msg:
  35. errprint(msg)
  36. return
  37. for o, a in opts:
  38. if o == '-q':
  39. filename_only = filename_only + 1
  40. if o == '-v':
  41. verbose = verbose + 1
  42. if not args:
  43. errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
  44. return
  45. for arg in args:
  46. check(arg)
  47. class NannyNag(Exception):
  48. """
  49. Raised by tokeneater() if detecting an ambiguous indent.
  50. Captured and handled in check().
  51. """
  52. def __init__(self, lineno, msg, line):
  53. self.lineno, self.msg, self.line = lineno, msg, line
  54. def get_lineno(self):
  55. return self.lineno
  56. def get_msg(self):
  57. return self.msg
  58. def get_line(self):
  59. return self.line
  60. def check(file):
  61. """check(file_or_dir)
  62. If file_or_dir is a directory and not a symbolic link, then recursively
  63. descend the directory tree named by file_or_dir, checking all .py files
  64. along the way. If file_or_dir is an ordinary Python source file, it is
  65. checked for whitespace related problems. The diagnostic messages are
  66. written to standard output using the print statement.
  67. """
  68. if os.path.isdir(file) and not os.path.islink(file):
  69. if verbose:
  70. print "%r: listing directory" % (file,)
  71. names = os.listdir(file)
  72. for name in names:
  73. fullname = os.path.join(file, name)
  74. if (os.path.isdir(fullname) and
  75. not os.path.islink(fullname) or
  76. os.path.normcase(name[-3:]) == ".py"):
  77. check(fullname)
  78. return
  79. try:
  80. f = open(file)
  81. except IOError, msg:
  82. errprint("%r: I/O Error: %s" % (file, msg))
  83. return
  84. if verbose > 1:
  85. print "checking %r ..." % file
  86. try:
  87. process_tokens(tokenize.generate_tokens(f.readline))
  88. except tokenize.TokenError, msg:
  89. errprint("%r: Token Error: %s" % (file, msg))
  90. return
  91. except IndentationError, msg:
  92. errprint("%r: Indentation Error: %s" % (file, msg))
  93. return
  94. except NannyNag, nag:
  95. badline = nag.get_lineno()
  96. line = nag.get_line()
  97. if verbose:
  98. print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
  99. print "offending line: %r" % (line,)
  100. print nag.get_msg()
  101. else:
  102. if ' ' in file: file = '"' + file + '"'
  103. if filename_only: print file
  104. else: print file, badline, repr(line)
  105. return
  106. if verbose:
  107. print "%r: Clean bill of health." % (file,)
  108. class Whitespace:
  109. # the characters used for space and tab
  110. S, T = ' \t'
  111. # members:
  112. # raw
  113. # the original string
  114. # n
  115. # the number of leading whitespace characters in raw
  116. # nt
  117. # the number of tabs in raw[:n]
  118. # norm
  119. # the normal form as a pair (count, trailing), where:
  120. # count
  121. # a tuple such that raw[:n] contains count[i]
  122. # instances of S * i + T
  123. # trailing
  124. # the number of trailing spaces in raw[:n]
  125. # It's A Theorem that m.indent_level(t) ==
  126. # n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
  127. # is_simple
  128. # true iff raw[:n] is of the form (T*)(S*)
  129. def __init__(self, ws):
  130. self.raw = ws
  131. S, T = Whitespace.S, Whitespace.T
  132. count = []
  133. b = n = nt = 0
  134. for ch in self.raw:
  135. if ch == S:
  136. n = n + 1
  137. b = b + 1
  138. elif ch == T:
  139. n = n + 1
  140. nt = nt + 1
  141. if b >= len(count):
  142. count = count + [0] * (b - len(count) + 1)
  143. count[b] = count[b] + 1
  144. b = 0
  145. else:
  146. break
  147. self.n = n
  148. self.nt = nt
  149. self.norm = tuple(count), b
  150. self.is_simple = len(count) <= 1
  151. # return length of longest contiguous run of spaces (whether or not
  152. # preceding a tab)
  153. def longest_run_of_spaces(self):
  154. count, trailing = self.norm
  155. return max(len(count)-1, trailing)
  156. def indent_level(self, tabsize):
  157. # count, il = self.norm
  158. # for i in range(len(count)):
  159. # if count[i]:
  160. # il = il + (i/tabsize + 1)*tabsize * count[i]
  161. # return il
  162. # quicker:
  163. # il = trailing + sum (i/ts + 1)*ts*count[i] =
  164. # trailing + ts * sum (i/ts + 1)*count[i] =
  165. # trailing + ts * sum i/ts*count[i] + count[i] =
  166. # trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
  167. # trailing + ts * [(sum i/ts*count[i]) + num_tabs]
  168. # and note that i/ts*count[i] is 0 when i < ts
  169. count, trailing = self.norm
  170. il = 0
  171. for i in range(tabsize, len(count)):
  172. il = il + i/tabsize * count[i]
  173. return trailing + tabsize * (il + self.nt)
  174. # return true iff self.indent_level(t) == other.indent_level(t)
  175. # for all t >= 1
  176. def equal(self, other):
  177. return self.norm == other.norm
  178. # return a list of tuples (ts, i1, i2) such that
  179. # i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
  180. # Intended to be used after not self.equal(other) is known, in which
  181. # case it will return at least one witnessing tab size.
  182. def not_equal_witness(self, other):
  183. n = max(self.longest_run_of_spaces(),
  184. other.longest_run_of_spaces()) + 1
  185. a = []
  186. for ts in range(1, n+1):
  187. if self.indent_level(ts) != other.indent_level(ts):
  188. a.append( (ts,
  189. self.indent_level(ts),
  190. other.indent_level(ts)) )
  191. return a
  192. # Return True iff self.indent_level(t) < other.indent_level(t)
  193. # for all t >= 1.
  194. # The algorithm is due to Vincent Broman.
  195. # Easy to prove it's correct.
  196. # XXXpost that.
  197. # Trivial to prove n is sharp (consider T vs ST).
  198. # Unknown whether there's a faster general way. I suspected so at
  199. # first, but no longer.
  200. # For the special (but common!) case where M and N are both of the
  201. # form (T*)(S*), M.less(N) iff M.len() < N.len() and
  202. # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
  203. # XXXwrite that up.
  204. # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
  205. def less(self, other):
  206. if self.n >= other.n:
  207. return False
  208. if self.is_simple and other.is_simple:
  209. return self.nt <= other.nt
  210. n = max(self.longest_run_of_spaces(),
  211. other.longest_run_of_spaces()) + 1
  212. # the self.n >= other.n test already did it for ts=1
  213. for ts in range(2, n+1):
  214. if self.indent_level(ts) >= other.indent_level(ts):
  215. return False
  216. return True
  217. # return a list of tuples (ts, i1, i2) such that
  218. # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
  219. # Intended to be used after not self.less(other) is known, in which
  220. # case it will return at least one witnessing tab size.
  221. def not_less_witness(self, other):
  222. n = max(self.longest_run_of_spaces(),
  223. other.longest_run_of_spaces()) + 1
  224. a = []
  225. for ts in range(1, n+1):
  226. if self.indent_level(ts) >= other.indent_level(ts):
  227. a.append( (ts,
  228. self.indent_level(ts),
  229. other.indent_level(ts)) )
  230. return a
  231. def format_witnesses(w):
  232. firsts = map(lambda tup: str(tup[0]), w)
  233. prefix = "at tab size"
  234. if len(w) > 1:
  235. prefix = prefix + "s"
  236. return prefix + " " + ', '.join(firsts)
  237. def process_tokens(tokens):
  238. INDENT = tokenize.INDENT
  239. DEDENT = tokenize.DEDENT
  240. NEWLINE = tokenize.NEWLINE
  241. JUNK = tokenize.COMMENT, tokenize.NL
  242. indents = [Whitespace("")]
  243. check_equal = 0
  244. for (type, token, start, end, line) in tokens:
  245. if type == NEWLINE:
  246. # a program statement, or ENDMARKER, will eventually follow,
  247. # after some (possibly empty) run of tokens of the form
  248. # (NL | COMMENT)* (INDENT | DEDENT+)?
  249. # If an INDENT appears, setting check_equal is wrong, and will
  250. # be undone when we see the INDENT.
  251. check_equal = 1
  252. elif type == INDENT:
  253. check_equal = 0
  254. thisguy = Whitespace(token)
  255. if not indents[-1].less(thisguy):
  256. witness = indents[-1].not_less_witness(thisguy)
  257. msg = "indent not greater e.g. " + format_witnesses(witness)
  258. raise NannyNag(start[0], msg, line)
  259. indents.append(thisguy)
  260. elif type == DEDENT:
  261. # there's nothing we need to check here! what's important is
  262. # that when the run of DEDENTs ends, the indentation of the
  263. # program statement (or ENDMARKER) that triggered the run is
  264. # equal to what's left at the top of the indents stack
  265. # Ouch! This assert triggers if the last line of the source
  266. # is indented *and* lacks a newline -- then DEDENTs pop out
  267. # of thin air.
  268. # assert check_equal # else no earlier NEWLINE, or an earlier INDENT
  269. check_equal = 1
  270. del indents[-1]
  271. elif check_equal and type not in JUNK:
  272. # this is the first "real token" following a NEWLINE, so it
  273. # must be the first token of the next program statement, or an
  274. # ENDMARKER; the "line" argument exposes the leading whitespace
  275. # for this statement; in the case of ENDMARKER, line is an empty
  276. # string, so will properly match the empty string with which the
  277. # "indents" stack was seeded
  278. check_equal = 0
  279. thisguy = Whitespace(line)
  280. if not indents[-1].equal(thisguy):
  281. witness = indents[-1].not_equal_witness(thisguy)
  282. msg = "indent not equal e.g. " + format_witnesses(witness)
  283. raise NannyNag(start[0], msg, line)
  284. if __name__ == '__main__':
  285. main()