Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
Manikanta-Bhuvanesh authored Feb 8, 2022
1 parent 82ded4d commit da67de3
Show file tree
Hide file tree
Showing 65 changed files with 140,428 additions and 0 deletions.
65 changes: 65 additions & 0 deletions Lab1/Lab1.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
1+2
2*3
6/3
a=1
b=4
a+b
a-b
a*b
a/b
search()
a=readline("Enter a number")
ls()
getwd()
list.files()


a=c(1,2,3,4,5,6)
a
b=c(1+3i,2+4i,5+8i,8+7i)
b
d=c("T","F","F","T","F","T","T")
d
o=c('a','b','c','d')
o
f=c(1,3,5,3+4i,"T",'a')
f




a=c(1,5,3,87,3,8)
b=c(4,8,6,5,9,3,9,4)
a=c(a,b)
a


a=c(1,4,6,7,2,NA,28,NA,NA,7)
mean(a, na.rm=TRUE)
sum(a, na.rm=TRUE)
prod(a, na.rm=TRUE)


g=c(23,1,3,54,56,86,23,45,67,87)
max(g)
min(g)


sort(g,TRUE)[2]

t=c(2,2,4,5,7,4,8,4)
unique(t)
t[duplicated(t)]


a=c(1,2,3)
b=c(4,5,6)
c=c(7,8,9)
p=cbind(a,b,c)
p

a=c(500,465,700,478,892,446)
w=(0.8*5)/100
p=a*w
p

Binary file added Lab1/Lab1_19BCD7088.pdf
Binary file not shown.
Binary file added Lab1/lab1.docx
Binary file not shown.
Binary file added Lab10/Lab10_19BCD7088.pdf
Binary file not shown.
129 changes: 129 additions & 0 deletions Lab10/lab10.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
df <- iris[, -5]
set.seed(240)
kmeans.re <- kmeans(df, centers = 3, nstart = 20)
kmeans.re
kmeans.re$cluster
plot(df[c("Sepal.Length", "Sepal.Width")])
plot(df[c("Sepal.Length", "Sepal.Width")], col = kmeans.re$cluster)
plot(df[c("Sepal.Length", "Sepal.Width")], col = kmeans.re$cluster, main = "K-means with 3 clusters")
kmeans.re$centers
kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")]
points(kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")], col = 1:3, pch = 8, cex = 3)
y_kmeans <- kmeans.re$cluster
clusplot(df[, c("Sepal.Length", "Sepal.Width")],y_kmeans,lines = 0,shade = TRUE,color = TRUE,labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste("Cluster iris"),
xlab = 'Sepal.Length',
ylab = 'Sepal.Width')


df<-readingSkills[c(1:105), ]
split <- sample.split(df, SplitRatio = 0.8)
split
train_reg <- subset(df, split == "TRUE")
test_reg <- subset(df, split == "FALSE")
logistic_model <- glm(nativeSpeaker ~ age + shoeSize + score, data = train_reg, family = "binomial")
summary(logistic_model)
predict_reg <- predict(logistic_model,test_reg, type = "response")
predict_reg
predict_reg <- ifelse(predict_reg >0.75, 1, 0)
table(test_reg$nativeSpeaker, predict_reg)
missing_classerr <- mean(predict_reg != test_reg$nativeSpeaker)
print(paste('Accuracy =', 1 - missing_classerr))



png(file = "decision_tree.png")
output.tree <- ctree(nativeSpeaker ~ age + shoeSize + score,data = df)
plot(output.tree)
dev.off()

df = iris[,c(1,2,5)]
model <- svm(Species ~ ., data=df)
summary(model)
final_svm <- svm(Species ~ ., data=df, kernel="radial", cost=1,gamma=1)
plot(final_svm , df)


df<-hsb
set.seed(7267166)
trainIndex=createDataPartition(df$prog, p=0.7)$Resample1
train=df[trainIndex, ]
test=df[-trainIndex, ]
print(table(df$prog))
NBclassfier=naiveBayes(prog~science+socst, data=train)
print(NBclassfier)
Print=function(model){
trainPred=predict(model, newdata = train, type = "class")
trainTable=table(train$prog, trainPred)
testPred=predict(NBclassfier, newdata=test, type="class")
testTable=table(test$prog, testPred)
trainAcc=(trainTable[1,1]+trainTable[2,2]+trainTable[3,3])/sum(trainTable)
testAcc=(testTable[1,1]+testTable[2,2]+testTable[3,3])/sum(testTable)
message("Contingency Table for Training Data")
print(trainTable)
message("Contingency Table for Test Data")
print(testTable)
message("Accuracy")
print(round(cbind(trainAccuracy=trainAcc, testAccuracy=testAcc),3))
}
Print(NBclassfier)
print(table(train$prog))
newNBclassifier=naive_bayes(prog~ses+science+socst,usekernel=T,data=train)
Print(newNBclassifier)

df = iris
df = df[-c(1,8)]
iris_tr_feat <- df[,1:4]
set.seed(1)
train_pred <- knn(iris_tr_feat, iris_tr_feat, df$Species, k=3)
train_pred[1:10]
accuracy <- mean(train_pred == df$Species)
cat("Training Accuracy: ", accuracy, sep='')

df <- read.csv('winequality.csv')
df <- df[,c(1,9,11,12)]
split <- sample.split(df, SplitRatio = 0.8)
split
train_reg <- subset(df, split == "TRUE")
test_reg <- subset(df, split == "FALSE")
logistic_model <- glm( quality ~ fixed.acidity+pH + alcohol,data = df)
logistic_model
summary(logistic_model)
predict_reg <- predict(logistic_model,test_reg, type = "response")
predict_reg
predict_reg <- ifelse(predict_reg >0.5, 1, 0)
table(test_reg$quality, predict_reg)
missing_classerr <- mean(predict_reg != test_reg$quality)
print(paste('Accuracy =', 1 - missing_classerr))


png(file = "decision_tree1.png")
output.tree <- ctree(quality ~ fixed.acidity + pH + alcohol,data = df)
plot(output.tree)
dev.off()

model <- svm(quality ~., data=df)
summary(model)
final_svm <- svm(quality ~., data=df, kernel="radial", cost=1,gamma=1)
plot(final_svm , df)

split <- sample.split(df, SplitRatio = 0.7)
trainl <- subset(df, split == "TRUE")
testl <- subset(df, split == "FALSE")
train_scale <- scale(trainl[, 1:4])
test_scale <- scale(testl[, 1:4])
set.seed(120)
classifier_cl <- naiveBayes(quality ~ ., data = trainl)
classifier_cl
y_pred <- predict(classifier_cl, newdata = testl)
cm <- table(testl$quality, y_pred)
cm

df1<- df[,1:4]
set.seed(1)
train_pred <- knn(df1, df1, df$quality, k=3)
train_pred[1:10]
accuracy <- mean(train_pred == df$quality)
cat("Training Accuracy: ", accuracy, sep='')
Binary file added Lab10/lab10.docx
Binary file not shown.
Loading

0 comments on commit da67de3

Please sign in to comment.