-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path01_start.py
More file actions
162 lines (125 loc) · 6.05 KB
/
01_start.py
File metadata and controls
162 lines (125 loc) · 6.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
# coding=utf-8
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains multinomial Naive Bayes on the tweet corpus
# to find two different results:
# - How well can we distinguis positive from negative tweets?
# - How well can we detect whether a tweet contains sentiment at all?
#
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
def create_ngram_model():
tfidf_ngrams = TfidfVectorizer(ngram_range=(1, 3),
analyzer="word", binary=False)
clf = MultinomialNB()
# clf.fit()
# clf.score()
# clf.predict_proba()
# tfidf_ngrams.fit()
pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])
return pipeline
def train_model(clf_factory, X, Y, name="NB ngram", plot=False):
# Only produces index
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train) # tweets and its feature
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
"""
from https://en.wikipedia.org/wiki/Receiver_operating_characteristic
In statistics, a receiver operating characteristic curve, or ROC curve, is a graphical plot that illustrates the
performance of a binary classifier system as its discrimination threshold is varied. The curve is created by
plotting the true positive rate (TPR) against the false positive rate (FPR) at various threshold settings.
The true-positive rate is also known as sensitivity, recall or probability of detection[1] in machine learning.
The false-positive rate is also known as the fall-out or probability of false alarm[1] and can be calculated as
(1 − specificity). The ROC curve is thus the sensitivity as a function of fall-out. In general, if the
probability distributions for both detection and false alarm are known, the ROC curve can be generated by
plotting the cumulative distribution function (area under the probability distribution from
− ∞ {\displaystyle -\infty } -\infty to the discrimination threshold) of the detection probability in the
y-axis versus the cumulative distribution function of the false-alarm probability in x-axis.
ROC analysis provides tools to select possibly optimal models and to discard suboptimal ones independently from
(and prior to specifying) the cost context or the class distribution. ROC analysis is related in a direct and natural
way to cost/benefit analysis of diagnostic decision making.
"""
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
plot_result = np.c_[proba, X_test, y_test]
print("result for - {0}".format(name))
print(plot_result[:20])
pr_scores.append(auc(recall, precision)) # auc is between 0.5 - 1, 0.5 is the worst, 1 is the prefact.
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
if plot:
plot_pr(pr_scores[median], name, "01", precisions[median],
recalls[median], label=name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data() # return tweets and labels
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
# return an array contains True if it is of class positive OR negative
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
# Filter to left only positive and negative ones.
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"]) # array contains 0 or 1, 1 means positive and 0 means negative.
train_model(create_ngram_model, X, Y, name="pos vs neg", plot=True)
# positive and negative are both taken as POSITIVE in model and others are taken as NEGATIVE
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
train_model(create_ngram_model, X, Y, name="sent vs rest", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(create_ngram_model, X, Y, name="pos vs rest", plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(create_ngram_model, X, Y, name="neg vs rest", plot=True)
print("time spent:", time.time() - start_time)