-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathTest3.py
157 lines (157 loc) · 6.38 KB
/
Test3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import numpy as np
import cv2 as cv
import os
import scipy.optimize as opt
from numpy import loadtxt, where, zeros, e, array, log, ones, append, linspace
from pylab import scatter, show, legend, xlabel, ylabel, contour, title
import matplotlib.pyplot as plt
def im2double(im):
min_val = np.min(im.ravel())
max_val = np.max(im.ravel())
out = (im.astype('float') - min_val) / (max_val - min_val)
return out
def sigmoid(values):
return 1/(1+np.exp(-values))
def Cost_Function_regulariztion(theta,X,y,lambdas,m):
h = sigmoid(X.dot(theta)/30001)
thetaR = theta[1:]
J = (1.0 / m) * ((-y.T.dot(np.log(h))) - ((1 - y).T.dot(np.log(1.0 - h)))) + (lambdas / (2.0 * m)) * (thetaR.T.dot(thetaR))
return np.array(J)
def Gradient_for_f(theta,X,y,lambdas,m):
h = sigmoid(X.dot(theta)/30001)
dif = h - y
gradient = (1 / m) * (np.transpose(X).dot(dif)) + (lambdas / m) * theta
gradient[0] = gradient[0] - (lambdas / m) * theta[0] # not regularized the bias theta0
return np.array(gradient)
def Batch_Gradient_Descent(theta,X,y,lambdas,m,learningrate):
tmp=Gradient_for_f(theta,X,y,lambdas,m)
new_theta=theta-learningrate*tmp
return new_theta
def Batch_Gradient_Descent_With_Momentum(theta,X,y,lambdas,m,learningrate,velocity_old):
updated_theta=theta
grad=Gradient_for_f(theta,X,y,lambdas,m)
gamma=0.9
velocity_new=gamma*velocity_old+learningrate*grad
updated_theta=updated_theta-velocity_new
return updated_theta,velocity_new
def error_calculating(theta,X,Y):
error=0
for i in range(len(X)):
if ((Y[i] == 0 and sigmoid(X[i].T.dot(theta) / 30001) >= 0.5) or (
Y[i] == 1 and sigmoid(X[i].T.dot(theta) / 30001) < 0.5)):
error+=1
return error/len(X)
def training_progress_Nomomentum(X,Y,m,initial_theta,max_iteration,learning_rates):
updated_theta = initial_theta
for i in range(max_iteration):
print("Current cost:", Cost_Function_regulariztion(updated_theta, X, Y, 0, m))
updated_theta = Batch_Gradient_Descent(updated_theta, X, Y, 0, m, learning_rates)
return updated_theta
def training_progress_Withmomentum(X,Y,m,initial_theta,max_iteration,learning_rates,XVAL,YVAL,XTEST,YTEST):
updated_theta = initial_theta
velocity_old=np.zeros_like(initial_theta)
Accuracy_Train_per_iter=[0]
Iter=[0]
Accuracy_Validate_per_iter=[0]
Accuracy_Test_per_iter = [0]
for i in range(max_iteration):
print("Current cost:", Cost_Function_regulariztion(updated_theta, X, Y, 0, m))
updated_theta,velocity_old = Batch_Gradient_Descent_With_Momentum(updated_theta, X, Y, 0, m, learning_rates,velocity_old)
Accuracy_Train_per_iter.append(calculating_Accuracy(X,Y,m,updated_theta,len(X)))
Accuracy_Validate_per_iter.append(calculating_Accuracy(XVAL,YVAL,m,updated_theta,len(XVAL)))
Accuracy_Test_per_iter.append(calculating_Accuracy(XTEST,YTEST,m,updated_theta,len(XTEST)))
Iter.append(i)
return updated_theta,Accuracy_Train_per_iter,Accuracy_Validate_per_iter,Accuracy_Test_per_iter,Iter
def calculating_Accuracy(X,Y,m,optimize_theta,num_images):
count=0
for i in range(len(X)):
if ((Y[i] == 1 and sigmoid(X[i].T.dot(optimize_theta) / 30001) >= 0.5) or (
Y[i] == 0 and sigmoid(X[i].T.dot(optimize_theta) / 30001) < 0.5)):
count += 1
return count/num_images
id = 0
imgpath = r'C:\Users\USER\PycharmProjects\untitled1\SC\AllData'
undetermined=0
input=[]#1400 images
validate=[]#500 images
test=[]#523
Actualvalues=[]#A list representing whether the index image open or closed(training)
ActualvaluesValidate=[]#validateset
ActualvaluesTest=[]#testset
for image_name in (os.listdir(imgpath))[:]:
if image_name.endswith('.jpg'):
print(id)
id += 1
img = cv.imread(imgpath + '\\' + image_name)
img=im2double(img)
img = np.reshape(img, 30000)#30000 features
#img=img[:15000]
EachImage=[1]#Bias Feature
for i in range(len(img)):
EachImage.append(img[i])
if(id<1400):#training
EachImage=np.array(EachImage)
input.append(EachImage)
if image_name.endswith('open.jpg'):
Actualvalues.append(1)
else:
Actualvalues.append(0)
elif (id >= 1400 and id < 1900):#validate
EachImage = np.array(EachImage)
validate.append(EachImage)
if image_name.endswith('open.jpg'):
ActualvaluesValidate.append(1)
else:
ActualvaluesValidate.append(0)
else:#test
EachImage = np.array(EachImage)
test.append(EachImage)
if image_name.endswith('open.jpg'):
ActualvaluesTest.append(1)
else:
ActualvaluesTest.append(0)
input=np.array(input)
validate=np.array(validate)
test=np.array(test)
Actualvalues=np.array(Actualvalues)
ActualvaluesValidate=np.array(ActualvaluesValidate)
ActualvaluesTest=np.array(ActualvaluesTest)
m=Actualvalues.shape[0]
mylambda=3.0
max1=10
#for i in range(1000):
initial_theta=np.random.randn(len(EachImage))#Number Of Feature + biased
learning_rates=100
maxiteration=1200
count=0
print(len(validate))
print(len(test))
print(len(input))
error_train=[]
error_validate=[]
tmpinput=input[0:1,:]
tmpActualvalues=Actualvalues[0:1]
#for i in range(1,len(input)+1):#i training examples
#tmpinput=input[0:i,:]
#tmpActualvalues=Actualvalues[0:i]
#print('WITH '+str(i) +' training examples ')
#initial_theta = np.random.randn(len(EachImage))
Accuracy_Train=[]
Iteration=[]
Accuracy_Validate=[]
Accuracy_Test=[]
Iter_theta=[]
Accuracy_Train_per_theta=[]
Accuracy_Validate_per_theta=[]
for i in range(10):
initial_theta = np.random.randn(len(EachImage)) # Number Of Feature + biased
Iter_theta.append(i)
optimize_theta,Accuracy_Train,Accuracy_Validate,Accuracy_Test,Iteration=training_progress_Withmomentum(input,Actualvalues,m,initial_theta,maxiteration,learning_rates,validate,ActualvaluesValidate,test,ActualvaluesTest)
Accuracy_Train_per_theta.append(Accuracy_Train[-1])
Accuracy_Validate_per_theta.append(Accuracy_Validate[-1])
plt.xlabel("Iteration")
plt.ylabel("Accuracy")
plt.plot(Iter_theta,Accuracy_Train_per_theta,label="Training Accuracy")
plt.plot(Iter_theta,Accuracy_Validate_per_theta,label="Validate Accuracy")
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.show()