Skip to content

Commit c4c660c

Browse files
committed
Sam Ruby's patch to use unittest rather than nose as the test framework, and preform a few other bits of cleanup. Patch to documentation coming up.
--HG-- extra : convert_revision : svn%3Aacbfec75-9323-0410-a652-858a13e371e0/trunk%40119
1 parent 28435fc commit c4c660c

1 file changed

Lines changed: 26 additions & 24 deletions

File tree

tests/test_tokenizer.py

Lines changed: 26 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,15 @@
11
import sys
2+
import os
23
import glob
34
import StringIO
5+
import unittest
6+
import new
47

58
import simplejson
69

710
#Allow us to import the parent module
8-
sys.path.insert(0, "../")
11+
os.chdir(os.path.split(os.path.abspath(__file__))[0])
12+
sys.path.insert(0, os.path.abspath(os.pardir))
913

1014
import tokenizer
1115

@@ -46,7 +50,7 @@ def atheistParseError(self):
4650
"""This error is not an error"""
4751
self.outputTokens.append(u"AtheistParseError")
4852

49-
def concatanateCharacterTokens(tokens):
53+
def concatenateCharacterTokens(tokens):
5054
outputTokens = []
5155
for token in tokens:
5256
if not "ParseError" in token and token[0] == "Character":
@@ -63,7 +67,7 @@ def tokensMatch(expectedTokens, recievedTokens):
6367
"""Test whether the test has passed or failed
6468
6569
For brevity in the tests, the test has passed if the sequence of expected
66-
tokens appears anywhere in the sequqnce of returned tokens.
70+
tokens appears anywhere in the sequence of returned tokens.
6771
"""
6872
return expectedTokens == recievedTokens
6973
for i, token in enumerate(recievedTokens):
@@ -74,37 +78,35 @@ def tokensMatch(expectedTokens, recievedTokens):
7478
return False
7579

7680

81+
class TestCase(unittest.TestCase):
82+
def runTokenizerTest(self, input, output):
83+
#XXX - move this out into the setup function
84+
#concatenate all consecutive character tokens into a single token
85+
output = concatenateCharacterTokens(output)
86+
parser = TokenizerTestParser()
87+
tokens = parser.parse(StringIO.StringIO(input))
88+
tokens = concatenateCharacterTokens(tokens)
89+
self.assertTrue(tokensMatch(tokens, output))
90+
7791
def test_tokenizer():
7892
for filename in glob.glob('tokenizer/*.test'):
7993
tests = simplejson.load(file(filename))
8094
for test in tests['tests']:
81-
yield (runTokenizerTest, test['description'], test['input'],
82-
test['output'])
83-
84-
def runTokenizerTest(description, input, output):
85-
#XXX - move this out into the setup function
86-
#concatanate all consecutive character tokens into a single token
87-
output = concatanateCharacterTokens(output)
88-
parser = TokenizerTestParser()
89-
tokens = parser.parse(StringIO.StringIO(input))
90-
tokens = concatanateCharacterTokens(tokens)
91-
print "Got", tokens, "expected", output
92-
assert tokensMatch(tokens, output)
95+
yield (TestCase.runTokenizerTest, test['description'],
96+
test['input'], test['output'])
9397

9498
def main():
9599
failed = 0
96100
tests = 0
97101
for func, desc, input, output in test_tokenizer():
98102
tests += 1
99-
try:
100-
func(desc, input, output)
101-
except AssertionError:
102-
print "Failed test %s"%(desc,)
103-
parser = TokenizerTestParser()
104-
tokens = parser.parse(StringIO.StringIO(input))
105-
print "Got", tokens, "expected", output
106-
failed +=1
107-
print "Ran %i tests, failed %i"%(tests, failed)
103+
testName = 'test%d' % tests
104+
testFunc = lambda self, method=func, input=input, output=output: \
105+
method(self, input, output)
106+
testFunc.__doc__ = desc
107+
instanceMethod = new.instancemethod(testFunc, None, TestCase)
108+
setattr(TestCase, testName, instanceMethod)
109+
unittest.main()
108110

109111
if __name__ == "__main__":
110112
main()

0 commit comments

Comments
 (0)