Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,21 +74,21 @@ Running the Test-Suite
=====================

The minimal requirement for running the testsuite is ``py.test``. You can
install it with::
install it with:

pip install pytest

Clone this repository::
Clone this repository:

git clone https://github.com/aimacode/aima-python.git

Fetch the aima-data submodule::
Fetch the aima-data submodule:

cd aima-python
git submodule init
git submodule update

Then you can run the testsuite with::
Then you can run the testsuite from the `tests` directory with:

py.test

Expand Down
6 changes: 3 additions & 3 deletions learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
removeall, unique, product, mode, argmax, argmax_random_tie, isclose, gaussian,
dotproduct, vector_add, scalar_vector_product, weighted_sample_with_replacement,
weighted_sampler, num_or_str, normalize, clip, sigmoid, print_table,
DataFile, sigmoid_derivative
open_data, sigmoid_derivative
)

import copy
Expand Down Expand Up @@ -95,7 +95,7 @@ def __init__(self, examples=None, attrs=None, attrnames=None, target=-1,
if isinstance(examples, str):
self.examples = parse_csv(examples)
elif examples is None:
self.examples = parse_csv(DataFile(name + '.csv').read())
self.examples = parse_csv(open_data(name + '.csv').read())
else:
self.examples = examples
# Attrs are the indices of examples, unless otherwise stated.
Expand Down Expand Up @@ -949,7 +949,7 @@ def cross_validation_wrapper(learner, dataset, k=10, trials=1):
err_val = []
err_train = []
size = 1

while True:
errT, errV = cross_validation(learner, size, dataset, k)
# Check for convergence provided err_val is not empty
Expand Down
2 changes: 1 addition & 1 deletion search-4e.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@
"outputs": [],
"source": [
"from search import *\n",
"sgb_words = DataFile(\"EN-text/sgb-words.txt\")"
"sgb_words = open_data(\"EN-text/sgb-words.txt\")"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions search.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from utils import (
is_in, argmin, argmax, argmax_random_tie, probability, weighted_sampler,
memoize, print_table, DataFile, Stack, FIFOQueue, PriorityQueue, name,
memoize, print_table, open_data, Stack, FIFOQueue, PriorityQueue, name,
distance
)

Expand Down Expand Up @@ -1044,7 +1044,7 @@ class BoggleFinder:

def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist(DataFile("EN-text/wordlist.txt"))
BoggleFinder.wordlist = Wordlist(open_data("EN-text/wordlist.txt"))
self.found = {}
if board:
self.set_board(board)
Expand Down
3 changes: 3 additions & 0 deletions tests/pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[pytest]
filterwarnings =
ignore::ResourceWarning
8 changes: 6 additions & 2 deletions tests/test_learning.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
import math
import random
from utils import DataFile
from utils import open_data
from learning import *


Expand All @@ -18,27 +18,31 @@ def test_euclidean():
distance = euclidean_distance([0, 0, 0], [0, 0, 0])
assert distance == 0


def test_rms_error():
assert rms_error([2, 2], [2, 2]) == 0
assert rms_error((0, 0), (0, 1)) == math.sqrt(0.5)
assert rms_error((1, 0), (0, 1)) == 1
assert rms_error((0, 0), (0, -1)) == math.sqrt(0.5)
assert rms_error((0, 0.5), (0, -0.5)) == math.sqrt(0.5)


def test_manhattan_distance():
assert manhattan_distance([2, 2], [2, 2]) == 0
assert manhattan_distance([0, 0], [0, 1]) == 1
assert manhattan_distance([1, 0], [0, 1]) == 2
assert manhattan_distance([0, 0], [0, -1]) == 1
assert manhattan_distance([0, 0.5], [0, -0.5]) == 1


def test_mean_boolean_error():
assert mean_boolean_error([1, 1], [0, 0]) == 1
assert mean_boolean_error([0, 1], [1, 0]) == 1
assert mean_boolean_error([1, 1], [0, 1]) == 0.5
assert mean_boolean_error([0, 0], [0, 0]) == 0
assert mean_boolean_error([1, 1], [1, 1]) == 0


def test_mean_error():
assert mean_error([2, 2], [2, 2]) == 0
assert mean_error([0, 0], [0, 1]) == 0.5
Expand All @@ -53,7 +57,7 @@ def test_exclude():


def test_parse_csv():
Iris = DataFile('iris.csv').read()
Iris = open_data('iris.csv').read()
assert parse_csv(Iris)[0] == [5.1, 3.5, 1.4, 0.2, 'setosa']


Expand Down
18 changes: 9 additions & 9 deletions tests/test_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
import random

from text import *
from utils import isclose, DataFile
from utils import isclose, open_data


def test_text_models():
flatland = DataFile("EN-text/flatland.txt").read()
flatland = open_data("EN-text/flatland.txt").read()
wordseq = words(flatland)
P1 = UnigramTextModel(wordseq)
P2 = NgramTextModel(2, wordseq)
Expand Down Expand Up @@ -141,7 +141,7 @@ def test_char_models():


def test_viterbi_segmentation():
flatland = DataFile("EN-text/flatland.txt").read()
flatland = open_data("EN-text/flatland.txt").read()
wordseq = words(flatland)
P = UnigramTextModel(wordseq)
text = "itiseasytoreadwordswithoutspaces"
Expand All @@ -158,20 +158,20 @@ def test_shift_encoding():


def test_shift_decoding():
flatland = DataFile("EN-text/flatland.txt").read()
flatland = open_data("EN-text/flatland.txt").read()
ring = ShiftDecoder(flatland)
msg = ring.decode('Kyzj zj r jvtivk dvjjrxv.')

assert msg == 'This is a secret message.'


def test_permutation_decoder():
gutenberg = DataFile("EN-text/gutenberg.txt").read()
flatland = DataFile("EN-text/flatland.txt").read()
gutenberg = open_data("EN-text/gutenberg.txt").read()
flatland = open_data("EN-text/flatland.txt").read()

pd = PermutationDecoder(canonicalize(gutenberg))
assert pd.decode('aba') in ('ece', 'ete', 'tat', 'tit', 'txt')

pd = PermutationDecoder(canonicalize(flatland))
assert pd.decode('aba') in ('ded', 'did', 'ece', 'ele', 'eme', 'ere', 'eve', 'eye', 'iti', 'mom', 'ses', 'tat', 'tit')

Expand All @@ -183,7 +183,7 @@ def test_rot13_encoding():


def test_rot13_decoding():
flatland = DataFile("EN-text/flatland.txt").read()
flatland = open_data("EN-text/flatland.txt").read()
ring = ShiftDecoder(flatland)
msg = ring.decode(rot13('Hello, world!'))

Expand Down
8 changes: 4 additions & 4 deletions text.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
"outputs": [],
"source": [
"from text import *\n",
"from utils import DataFile"
"from utils import open_data"
]
},
{
Expand Down Expand Up @@ -84,7 +84,7 @@
}
],
"source": [
"flatland = DataFile(\"EN-text/flatland.txt\").read()\n",
"flatland = open_data(\"EN-text/flatland.txt\").read()\n",
"wordseq = words(flatland)\n",
"\n",
"P1 = UnigramTextModel(wordseq)\n",
Expand Down Expand Up @@ -186,7 +186,7 @@
}
],
"source": [
"flatland = DataFile(\"EN-text/flatland.txt\").read()\n",
"flatland = open_data(\"EN-text/flatland.txt\").read()\n",
"wordseq = words(flatland)\n",
"P = UnigramTextModel(wordseq)\n",
"text = \"itiseasytoreadwordswithoutspaces\"\n",
Expand Down Expand Up @@ -358,7 +358,7 @@
}
],
"source": [
"flatland = DataFile(\"EN-text/flatland.txt\").read()\n",
"flatland = open_data(\"EN-text/flatland.txt\").read()\n",
"decoder = ShiftDecoder(flatland)\n",
"\n",
"decoded_message = decoder.decode(ciphertext)\n",
Expand Down
11 changes: 2 additions & 9 deletions utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,20 +383,13 @@ def print_table(table, header=None, sep=' ', numfmt='{}'):
str(x), j)(size) for (j, size, x) in zip(justs, sizes, row)))


def AIMAFile(components, mode='r'):
"""Open a file based at the AIMA root directory."""
def open_data(name, mode='r'):
aima_root = os.path.dirname(__file__)

aima_file = os.path.join(aima_root, *components)
aima_file = os.path.join(aima_root, *['aima-data', name])

return open(aima_file)


def DataFile(name, mode='r'):
"Return a file in the AIMA /aima-data directory."
return AIMAFile(['aima-data', name], mode)


# ______________________________________________________________________________
# Expressions

Expand Down