Skip to content

Commit e426ffd

Browse files
committed
add SVM SMO and QP
1 parent 131ffda commit e426ffd

18 files changed

Lines changed: 952 additions & 1 deletion

.gitignore

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
## Core latex/pdflatex auxiliary files:
2+
*.aux
3+
*.lof
4+
*.log
5+
*.lot
6+
*.fls
7+
*.out
8+
*.toc
9+
*.fmt
10+
*.fot
11+
*.cb
12+
*.cb2
13+
14+
15+
## MAC
16+
.DS_Store
17+
18+
## PyCharm
19+
*.idea/*.iml
20+
21+
# auxiliary python files
22+
*.pyc

README.md

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,11 @@ MachineLearning
4848

4949
[libsvm liblinear-usage](https://github.com/wepe/MachineLearning/tree/master/SVM/libsvm%20liblinear-usage) 对使用广泛的libsvm、liblinear的使用方法进行了总结,详细介绍:[文章链接](http://blog.csdn.net/u012162613/article/details/45206813)
5050

51+
[SVM by SMO](./SVM/SVM_by_SMO) - 用SMO实现了SVM
52+
53+
[SVM by QP](./SVM/SVM_by_QP) - 用二次编程(QP)实现了SVM
54+
55+
5156
- **GMM**
5257

5358
GMM和k-means作为EM算法的应用,在某种程度有些相似之处,不过GMM明显学习出一些概率密度函数来,结合相关理解写成python版本,详细介绍:[文章链接](http://blog.csdn.net/gugugujiawei/article/details/45583051)
@@ -72,4 +77,4 @@ MachineLearning
7277

7378
- [wepon](https://github.com/wepe)
7479
- [Gogary](https://github.com/enjoyhot)
75-
80+
- [Locky](https://github.com/junlulocky)

SVM/SVM_by_QP/README.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# SVM Python
2+
3+
This is an example of SVM implementation by Python based on cvxopt.
4+
5+
The quadratic programming function, please refer to http://cvxopt.org/userguide/coneprog.html?highlight=qp#cvxopt.solvers.qp
6+
7+
The theory of SVM, please refer to the four SVM slides included in this project.

SVM/SVM_by_QP/SVCQP.py

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
import numpy as np
2+
from numpy import linalg
3+
import cvxopt
4+
import cvxopt.solvers
5+
6+
## define kenrel functions
7+
def linear_kernel(x1, x2):
8+
return np.dot(x1, x2)
9+
10+
def polynomial_kernel(x, y, p=3):
11+
return (1 + np.dot(x, y)) ** p
12+
13+
def gaussian_kernel(x, y, sigma=5.0):
14+
return np.exp(-linalg.norm(x-y)**2 / (2 * (sigma ** 2)))
15+
## end define kernel functions
16+
17+
class SVM(object):
18+
"""
19+
Suppoet vector classification by quadratic programming
20+
"""
21+
22+
def __init__(self, kernel=linear_kernel, C=None):
23+
"""
24+
25+
:param kernel: kernel types, should be in the kernel function list above
26+
:param C:
27+
"""
28+
self.kernel = kernel
29+
self.C = C
30+
if self.C is not None: self.C = float(self.C)
31+
32+
def fit(self, X, y):
33+
n_samples, n_features = X.shape
34+
35+
# Gram matrix
36+
K = np.zeros((n_samples, n_samples))
37+
for i in range(n_samples):
38+
for j in range(n_samples):
39+
K[i,j] = self.kernel(X[i], X[j])
40+
41+
P = cvxopt.matrix(np.outer(y,y) * K)
42+
q = cvxopt.matrix(np.ones(n_samples) * -1)
43+
A = cvxopt.matrix(y, (1,n_samples))
44+
b = cvxopt.matrix(0.0)
45+
46+
if self.C is None:
47+
G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
48+
h = cvxopt.matrix(np.zeros(n_samples))
49+
else:
50+
tmp1 = np.diag(np.ones(n_samples) * -1)
51+
tmp2 = np.identity(n_samples)
52+
G = cvxopt.matrix(np.vstack((tmp1, tmp2)))
53+
tmp1 = np.zeros(n_samples)
54+
tmp2 = np.ones(n_samples) * self.C
55+
h = cvxopt.matrix(np.hstack((tmp1, tmp2)))
56+
57+
# solve QP problem, DOC: http://cvxopt.org/userguide/coneprog.html?highlight=qp#cvxopt.solvers.qp
58+
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
59+
60+
# Lagrange multipliers
61+
a = np.ravel(solution['x'])
62+
63+
# Support vectors have non zero lagrange multipliers
64+
sv = a > 1e-5
65+
ind = np.arange(len(a))[sv]
66+
self.a = a[sv]
67+
self.sv = X[sv]
68+
self.sv_y = y[sv]
69+
print "%d support vectors out of %d points" % (len(self.a), n_samples)
70+
71+
# Intercept
72+
self.b = 0
73+
for n in range(len(self.a)):
74+
self.b += self.sv_y[n]
75+
self.b -= np.sum(self.a * self.sv_y * K[ind[n],sv])
76+
self.b /= len(self.a)
77+
78+
# Weight vector
79+
if self.kernel == linear_kernel:
80+
self.w = np.zeros(n_features)
81+
for n in range(len(self.a)):
82+
self.w += self.a[n] * self.sv_y[n] * self.sv[n]
83+
else:
84+
self.w = None
85+
86+
def project(self, X):
87+
if self.w is not None:
88+
return np.dot(X, self.w) + self.b
89+
else:
90+
y_predict = np.zeros(len(X))
91+
for i in range(len(X)):
92+
s = 0
93+
for a, sv_y, sv in zip(self.a, self.sv_y, self.sv):
94+
s += a * sv_y * self.kernel(X[i], sv)
95+
y_predict[i] = s
96+
return y_predict + self.b
97+
98+
def predict(self, X):
99+
return np.sign(self.project(X))
100+

SVM/SVM_by_QP/doc/.DS_Store

6 KB
Binary file not shown.
793 KB
Binary file not shown.
1.96 MB
Binary file not shown.
2.65 MB
Binary file not shown.
1.99 MB
Binary file not shown.
119 KB
Binary file not shown.

0 commit comments

Comments
 (0)