diff --git a/deep_learning4e.py b/deep_learning4e.py index bea9c8d2c..64aa49e90 100644 --- a/deep_learning4e.py +++ b/deep_learning4e.py @@ -8,7 +8,7 @@ from keras.layers import Embedding, SimpleRNN, Dense from keras.preprocessing import sequence -from utils4e import (sigmoid, dot_product, softmax1D, conv1D, gaussian_kernel, element_wise_product, vector_add, +from utils4e import (Sigmoid, dot_product, softmax1D, conv1D, gaussian_kernel, element_wise_product, vector_add, random_weights, scalar_vector_product, matrix_multiplication, map_vector, mse_loss) @@ -37,7 +37,7 @@ class NNUnit(Node): """ def __init__(self, weights=None, value=None): - super(NNUnit, self).__init__(value) + super().__init__(value) self.weights = weights or [] @@ -59,7 +59,7 @@ class OutputLayer(Layer): """1D softmax output layer in 19.3.2""" def __init__(self, size=3): - super(OutputLayer, self).__init__(size) + super().__init__(size) def forward(self, inputs): assert len(self.nodes) == len(inputs) @@ -73,7 +73,7 @@ class InputLayer(Layer): """1D input layer. Layer size is the same as input vector size.""" def __init__(self, size=3): - super(InputLayer, self).__init__(size) + super().__init__(size) def forward(self, inputs): """Take each value of the inputs to each unit in the layer.""" @@ -92,10 +92,10 @@ class DenseLayer(Layer): """ def __init__(self, in_size=3, out_size=3, activation=None): - super(DenseLayer, self).__init__(out_size) + super().__init__(out_size) self.out_size = out_size self.inputs = None - self.activation = sigmoid() if not activation else activation + self.activation = Sigmoid() if not activation else activation # initialize weights for node in self.nodes: node.weights = random_weights(-0.5, 0.5, in_size) @@ -118,7 +118,7 @@ class ConvLayer1D(Layer): """ def __init__(self, size=3, kernel_size=3): - super(ConvLayer1D, self).__init__(size) + super().__init__(size) # init convolution kernel as gaussian kernel for node in self.nodes: node.weights = gaussian_kernel(kernel_size) @@ -142,7 +142,7 @@ class MaxPoolingLayer1D(Layer): """ def __init__(self, size=3, kernel_size=3): - super(MaxPoolingLayer1D, self).__init__(size) + super().__init__(size) self.kernel_size = kernel_size self.inputs = None @@ -326,7 +326,7 @@ class BatchNormalizationLayer(Layer): """Batch normalization layer.""" def __init__(self, size, epsilon=0.001): - super(BatchNormalizationLayer, self).__init__(size) + super().__init__(size) self.epsilon = epsilon # self.weights = [beta, gamma] self.weights = [0, 0] diff --git a/learning.py b/learning.py index bcaf0961e..99ef8abc2 100644 --- a/learning.py +++ b/learning.py @@ -265,9 +265,9 @@ def cross_validation_wrapper(learner, dataset, k=10, trials=1): while True: errT, errV = cross_validation(learner, dataset, size, k, trials) # check for convergence provided err_val is not empty - if errT and not np.isclose(errT[-1], errT, rel_tol=1e-6): + if errT and not np.isclose(errT[-1], errT, rtol=1e-6): best_size = 0 - min_val = inf + min_val = np.inf i = 0 while i < size: if errs[i] < min_val: diff --git a/learning4e.py b/learning4e.py index 01d9ea290..f581b9ec1 100644 --- a/learning4e.py +++ b/learning4e.py @@ -7,7 +7,6 @@ from qpsolvers import solve_qp from probabilistic_learning import NaiveBayesLearner -from utils import sigmoid, sigmoid_derivative from utils4e import * @@ -265,9 +264,9 @@ def model_selection(learner, dataset, k=10, trials=1): while True: err = cross_validation(learner, dataset, size, k, trials) # check for convergence provided err_val is not empty - if err and not isclose(err[-1], err, rel_tol=1e-6): + if err and not np.isclose(err[-1], err, rtol=1e-6): best_size = 0 - min_val = inf + min_val = np.inf i = 0 while i < size: if errs[i] < min_val: @@ -569,8 +568,8 @@ def LogisticLinearLeaner(dataset, learning_rate=0.01, epochs=100): # pass over all examples for example in examples: x = [1] + example - y = sigmoid(dot_product(w, x)) - h.append(sigmoid_derivative(y)) + y = Sigmoid().f(dot_product(w, x)) + h.append(Sigmoid().derivative(y)) t = example[idx_t] err.append(t - y) @@ -581,7 +580,7 @@ def LogisticLinearLeaner(dataset, learning_rate=0.01, epochs=100): def predict(example): x = [1] + example - return sigmoid(dot_product(w, x)) + return Sigmoid().f(dot_product(w, x)) return predict diff --git a/tests/test_deep_learning4e.py b/tests/test_deep_learning4e.py index 92d73e96e..ed8979a0a 100644 --- a/tests/test_deep_learning4e.py +++ b/tests/test_deep_learning4e.py @@ -1,4 +1,3 @@ -import numpy as np import pytest from keras.datasets import imdb diff --git a/utils.py b/utils.py index 1d7f1e4f5..4bf29a9a3 100644 --- a/utils.py +++ b/utils.py @@ -273,11 +273,6 @@ def normalize(dist): return [(n / total) for n in dist] -def norm(x, ord=2): - """Return the n-norm of vector x.""" - return np.linalg.norm(x, ord) - - def random_weights(min_value, max_value, num_weights): return [random.uniform(min_value, max_value) for _ in range(num_weights)] diff --git a/utils4e.py b/utils4e.py index 6ed4a7f79..1c376066e 100644 --- a/utils4e.py +++ b/utils4e.py @@ -92,6 +92,10 @@ def remove_all(item, seq): """Return a copy of seq (or string) with all occurrences of item removed.""" if isinstance(seq, str): return seq.replace(item, '') + elif isinstance(seq, set): + rest = seq.copy() + rest.remove(item) + return rest else: return [x for x in seq if x != item] @@ -368,11 +372,6 @@ def normalize(dist): return [(n / total) for n in dist] -def norm(x, ord=2): - """Return the n-norm of vector x.""" - return np.linalg.norm(x, ord) - - def random_weights(min_value, max_value, num_weights): return [random.uniform(min_value, max_value) for _ in range(num_weights)] @@ -402,7 +401,10 @@ def gaussian_kernel_2D(size=3, sigma=0.5): class Activation: - def derivative(self, value): + def f(self, x): + pass + + def derivative(self, x): pass @@ -418,7 +420,7 @@ def softmax1D(x): return [exp / sum_exps for exp in exps] -class sigmoid(Activation): +class Sigmoid(Activation): def f(self, x): if x >= 100: @@ -431,7 +433,7 @@ def derivative(self, value): return value * (1 - value) -class relu(Activation): +class Relu(Activation): def f(self, x): return max(0, x) @@ -440,7 +442,7 @@ def derivative(self, value): return 1 if value > 0 else 0 -class elu(Activation): +class Elu(Activation): def f(self, x, alpha=0.01): return x if x > 0 else alpha * (np.exp(x) - 1) @@ -449,7 +451,7 @@ def derivative(self, value, alpha=0.01): return 1 if value > 0 else alpha * np.exp(value) -class tanh(Activation): +class Tanh(Activation): def f(self, x): return np.tanh(x) @@ -458,7 +460,7 @@ def derivative(self, value): return 1 - (value ** 2) -class leaky_relu(Activation): +class LeakyRelu(Activation): def f(self, x, alpha=0.01): return x if x > 0 else alpha * x