#--------------------------------------------------------------------------- # # Classification problems # #--------------------------------------------------------------------------- # install.packages("tm" , dependencies=T) # install.packages("gmodels", dependencies=T) # CrossTable # install.packages("AUC" , dependencies=T) # install.packages("tree" , dependencies=T) # install.packages("glmnet" , dependencies=T) # multinomial, lasso # install.packages("gbm" , dependencies=T) # boosting require (tm) require (stringr) require (gmodels) require (AUC) require (tree) setwd("~/courses/mich/text_analytics/") source("R_scripts/text_utils.R") source("R_scripts/missing_data.R") load("R_scripts/Wine.Rdata") ; dim(Wine) load("R_scripts/oovFreq.Rdata") ; dim(oovFreq) # in order of frequency #--------------------------------------------------------------------------- # # Predicting wine color # #--------------------------------------------------------------------------- # --- build unified data frame for modeling typeof(oovFreq) # big matrix of counts dim(oovFreq) oovFreq[1:5,1:5] (typeFreq <- colSums(oovFreq))[1:10] docLengths<- rowSums(oovFreq) # no longer need (see these in slides, prefix avoids conflict with 'color') colnames(oovFreq) <- paste("w_",colnames(oovFreq),sep="") wordProportions <- oovFreq / docLengths # or sqrt length (Poisson) rowSums(wordProportions[1:4,]) # each rows sums to 1 dim(wordProportions) # 20508 2645 # fill missing data before join to text or include response hasColor <- !is.na(Wine[,"color"]) Data <- data.frame(Wine[hasColor,c("alcohol","vintage","price","points")]) apply(Data,2,function(x) sum(is.na(x))) # all have missing values Data <- fill.missing(Data) dim(Data) # 17336 x 8 apply(Data,2,function(x) sum(is.na(x))) # missing are missing! Data <- data.frame(color=Wine[hasColor,"color"], Data, docLengths = docLengths[hasColor], wordProportions[hasColor,]) dim(Data) # 17336 2655 Data$color <- ifelse(Data$color=="Red",1,0) # recode 0/1 for glm dim(Data) # 17336 2667 colnames(Data)[c(1:12)] round(Data[1:10,10:20],2) # proportions # --- Split data into test and train data (make harder by saving a lot for testing!) set.seed(232) test <- sample(1:nrow(Data), 10000) train <- (1:nrow(Data))[-test] # --- Logistic regression classifies color very well # Build regression data as in OLS to avoid formula length constraint # Length again very useful nWords <- 40 # 20 then 40 then ... # just the "non-word" variables RegrData <- Data[,1:9] summary(lr <- glm("color ~ .", data=RegrData[train,], family=binomial(logit))) # just the words (with length) RegrData <- Data[,c(1,10:(10 + nWords))] summary(lr <- glm("color ~ .", data=RegrData[train,], family=binomial(logit))) # unified RegrData <- Data[,1:(10 + nWords)] summary(lr <- glm("color ~ .", data=RegrData[train,], family=binomial(logit))) # --- Predict color in test (type implies we want to predict y, not the log odds) pred <- predict(lr, newdata=RegrData[test,], type="response") round(pred[1:10],3) tab <- CrossTable(RegrData[test,"color"], pred>0.5, prop.t=F, prop.chisq=F) # precision, recall confusion_summary(RegrData[test,"color"]=="White", pred>0.5) # inputs must be logical (T/F) # --- calibration y <- ifelse(Data[test,"color"]=="White",1,0) plot(pred, y, col='gray', xlab="Estimated Pr(White)", ylab="True Color") abline(a=0,b=1,col='gray') # reference line o <- order(pred) x <- pred[o] y <- y[o] r <- loess(y ~ x, span=0.50) # vary span lines(x,fitted(r),col='red', lwd=3) r <- loess(y ~ x, span=0.20) # more wiggles lines(x,fitted(r),col='red', lwd=2) r <- loess(y ~ x, span=0.10) lines(x,fitted(r),col='red', lwd=1) i <- which(0.80.5) confusion_summary(Data[test,"color"]=="White", pred>0.4) confusion_summary(Data[test,"color"]=="White", pred>0.2) confusion_summary(Data[test,"color"]=="White", pred>0.1) ?AUC tmp <- roc(pred, Data[test,"color"]) auc(tmp) plot(tmp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # --- Lasso for feature selection in red/white example require(glmnet) Y <- ifelse(Data[,"color"]=="White",1,0) # glmnet uses explict X, Y, not formula # first model with 40 words as in logistic (then 200) nWords <- 40 X <- as.matrix(Data[,2:(10+nWords)]) dim(X) # 17336 x 49 (9 + nWords) lasso.regr <- glmnet(X[train,], Y[train], family='binomial', alpha=1) # alpha=1 means lasso plot(lasso.regr) # inspect coefficients names(lasso.regr) predict(lasso.regr, s=0, exact=T,type="coefficients") # coefficients, s = lambda predict(lasso.regr,s=.001,exact=T,type="coefficients") # some zeroed predict(lasso.regr,s=.010,exact=T,type="coefficients") # predict(lasso.regr,s=.100,exact=T,type="coefficients") # most zeroed predict(lasso.regr,s=.220,exact=T,type="coefficients") # only tannin present predict(lasso.regr,s=.200,exact=T,type="coefficients") # then apple predict(lasso.regr,s=.175,exact=T,type="coefficients") # then cherry # use 10 fold CV to pick tuning parameter lambda set.seed(6) cv.out<-cv.glmnet(X[train,], Y[train], family='binomial', alpha=1) plot(cv.out) (bestlam<-cv.out$lambda.min) (bestlam<-cv.out$lambda.1se) # work with the relatively sparse model (c<-predict(lasso.regr,s=bestlam,exact=T,type="coefficients")) (sum(as.vector(c) != 0)) # high probabilities... calibrated? lasso.pred <- predict(lasso.regr, s=bestlam, newx=X[test,], type="response") plot(lasso.pred, Y[test], col='gray', xlab="Estimated Pr(White)", ylab="True Color") o <- order(lasso.pred) x <- lasso.pred[o] y <- Y[test[o]] r <- loess(y ~ x) lines(x,fitted(r),col='red'); abline(a=0,b=1,col='gray') abline(h=0.5, lty=3, col='gray') i <- which(0.65 < x & x < 0.75) points(mean(x[i]), mean(y[i])) confusion_summary(Data[test,"color"]=="White", lasso.pred>0.5) # ROC tmp <- roc(lasso.pred, Data[test,"color"]) AUC::auc(tmp) # name conflict with glmnet plot(tmp) # look at coefficients (cloud) c <- predict(lasso.regr, s=bestlam, exact=T, type="coefficients") typeof(c) attributes(c) coef_cloud <- function(coefs, color=F) { require(wordcloud) require(stringr) v <- as.vector(coefs) nm <- str_split_fixed(attributes(coefs)$Dimnames[[1]],"_",2)[,2] wordcloud(nm[-1], abs(v[-1])/max(abs(v[-1])) * 100, random.color=color) } coef_cloud(c, TRUE) # --- not much need but can use a more modern classifier require(tree) ?tree ?tree.control lastColumn <- 20 tree.fit<-tree("color ~ .", data=Data[,1:lastColumn], subset=train) summary(tree.fit) tree.fit # printed summary (see ISL) plot(tree.fit); text(tree.fit, pretty=0, cex=0.5) tree.pred <- predict(tree.fit, Data[test,1:lastColumn], type="class") CrossTable(Data[test,"color"],tree.pred, prop.t=F, prop.chisq=F) confusion_summary(Data[test,"color"]=="Red", tree.pred=="Red") # try pruning tree set.seed (34) cvTree =cv.tree(tree.fit, FUN=prune.misclass ) names(cvTree) # k is penalty term, analogous to lambda in lasso cvTree par(mfrow=c(1,2)) plot(cvTree$size, cvTree$dev) # dev is really missclassification rate plot(cvTree$k , cvTree$dev, log="x") # k ≈ lambda reset() # tree fit is highly discrete (this code is a hack for sure) tree.phat <- predict(tree.fit, Data[test,], type="vector") plot(tree.phat[,"Red"], Data[test,"color"], xlab="Tree Fit") m <- tapply(Data[test,"color"], as.factor(tree.phat[,"Red"]), mean) points(as.numeric(names(m)), m, col="red") abline(a=0,b=1,col='gray') # boosted tree require(gbm) # gbm likes a 0/1 layout for response lastColumn <- 10 UseData <- Data[,2:lastColumn] # remove Red/White; replace with indicator UseData$WR <- ifelse(Data[,"color"]=="White", 1, 0) # this would not run unless I defined the formula explicitly (can use 'multinomial') set.seed(331) model <- as.formula("WR ~ alcohol+vintage+price+points") # shrinkage = learning rate bTree <- gbm(model, data=UseData[train,], n.trees=400, cv.folds=5, shrinkage=.005, interaction.depth=3, distribution='bernoulli', keep.data=T) ##### Error in object$var.levels[[i]] : subscript out of bounds error if use bernoulli names(bTree) bTree$n.minobsinnode # defaults to 10 pretty.gbm.tree(bTree, i.tree = 1) # details about component trees plot(bTree$cv.error) # need more, so increase the learning rate # increase learning rate (ideally would just run longer) bTree2 <- gbm(model, data=UseData[train,], n.trees=400, cv.folds=5, shrinkage=.020, interaction.depth=3, distribution='bernoulli') plot(bTree$cv.error, type='l', col='gray', ylim=range(bTree2$cv.error)) lines(bTree2$cv.error) # increase learning rate further bTree3 <- gbm(model, data=UseData[train,], n.trees=400, cv.folds=5, shrinkage=.100, interaction.depth=3, distribution='bernoulli') plot(bTree$cv.error, type='l', col='gray', ylim=range(bTree3$cv.error)) lines(bTree2$cv.error) lines(bTree3$cv.error, col='red') btProb <- drop(predict(bTree3,newdata=UseData[test,],n.trees=400,type="response")) head(btProb) # nicely calibrated? y <- UseData[test,"WR"] plot(btProb, y, xlab="Boosted Prediction", col='gray') o <- order(pred) x <- pred[o] y <- y[o] r <- loess(y ~ x) lines(x,fitted(r),col='red'); abline(a=0,b=1,col='gray') btClass<- ifelse(btProb > 0.5,"White","Red") tab <- table(UseData[test,"WR"], btClass); tab confusion_summary(Data[test,"color"]=="Red", btClass=="Red") #--------------------------------------------------------------------------- # # Distinguishing red varieties # #--------------------------------------------------------------------------- distinguish.varieties.multinomial <- function() { require (glmnet) # for multinomial model # --- build unified data frame as in prior example counts <- as.matrix(oovDTM) freq <- colSums(counts) counts <- counts[,order(freq,decreasing=T)] colnames(counts) <- paste("w_",colnames(counts),sep="") lengths<- rowSums(counts) props <- counts / (lengths) # or sqrt length -or- just count -or- just indicator sum(props[1,]) # identify top red varieties tab<-table(Wine$variety) sort(tab[tab>500],dec=T) reds <- c("Cabernet Sauvignon", "Merlot", "Pinot Noir", "Zinfandel") rows <- Wine$variety %in% reds sum(rows); head(rows) # build unified data set for analysis. fill missing data before join to text Data <- Wine[rows,c("alcohol","vintage","price")] dim(Data) apply(Data,2,function(x) sum(is.na(x))) Data <- fill.missing(Data) # use just these varieties to fill dim(Data) Data <- data.frame(variety=Wine[rows,"variety"], Data, lengths=lengths[rows], props[rows,]) dim(Data) # 4906 x 2667 colnames(Data)[1:10] # --- split into test and train data (250 each in test set) set.seed(84) test <- c() for (v in reds) test <- c(test,sample(which(Data$variety == v), 250)) table(Data$variety[test]) train <- (1:nrow(Data))[-test] length(train) # 3906 # --- fit multinomial regression using Lasso variation to select subset Y <- Data[,"variety"] # glmnet uses explict X, Y, not formula # first a basic model others <- "alcohol+vintage+price+lengths+Miss.alcohol+Miss.vintage+Miss.price" n.words <- 20 # 20 then 40 words <- paste(colnames(counts)[1:n.words],collapse="+") # matrix with few 'regular' variables... model <- as.formula(paste("~",others,collapse="")) X <- model.matrix(model, data=Data) [,-1] # drop intercept dim(X) multinom.regr <- glmnet(X[train,], Y[train], family='multinomial', alpha=1) par (mfrow=c(2,2)) # need room for 4 plots plot(multinom.regr) reset() # inspect coefficients coef_table <- function(coefs, rownames, rnd=4) { c <- do.call(cbind, lapply(coefs,as.vector)); rownames(c) <- rownames return (round(c,rnd)) } names(multinom.regr) coef<-predict(multinom.regr,s=0,exact=T,type="coefficients") # coefficients, s = lambda names <- c("Intercept",str_split(others,"\\+")[[1]]) round(coef_table(coef,names),4) table(Data[,"Miss.alcohol"], Data[,"variety"]) # odd large effect coef_table(predict(multinom.regr,s=.005,exact=T,type="coefficients"), names) # some zeroed coef_table(predict(multinom.regr,s=.010,exact=T,type="coefficients"), names) # coef_table(predict(multinom.regr,s=.050,exact=T,type="coefficients"), names) # most zeroed # use 10 fold CV to pick tuning parameter lambda (aka, s) set.seed(6) cv.out<-cv.glmnet(X[train,], Y[train], family='multinomial', alpha=1) plot(cv.out) (bestlam<-cv.out$lambda.1se) (cv.out$lambda.min) # get class estimated probabilities and then class with largest prob multinom.pred=predict(multinom.regr, s=bestlam, newx=X[test,], type='response') # prob dim(multinom.pred) multinom.pred <- drop(multinom.pred) # remove extraneous dim dim(multinom.pred) # now 4-column matrix head(multinom.pred) # probabilities sum to 1 head(rowSums(multinom.pred)) # check calibration of probabilities v <- "Merlot" # "Pinot Noir" "Zinfandel" "Cabernet Sauvignon" "Merlot" yHat <- multinom.pred[,v] y <- ifelse(Y[test]==v,1,0) # 0/1 for plot o <- order(yHat) # yuck y <- y[o]; yHat <- yHat[o] plot(y ~ yHat, xlab=paste("Estimated Probability",v), ylab=v) abline(a=0,b=1,col='gray') f <- loess(y~yHat,span=.95) lines(yHat, fitted(f), col='magenta') # not very well calibrated except for merlot i <- which(0.28 < yHat & yHat < 0.32); length(i) points(mean(yHat[i]), mean(y[i])) i <- which(0.38 < yHat & yHat < 0.42); length(i) points(mean(yHat[i]), mean(y[i])) i <- which(0.48 < yHat & yHat < 0.52); length(i) points(mean(yHat[i]), mean(y[i])) # class probabilities multinom.pred <- predict(multinom.regr,s=bestlam,newx=X[test,], type='class') # highest class tab <- table(Y[test], multinom.pred); tab # why is Zin more identifiable? coef <- predict(multinom.regr,s=bestlam,exact=T,type="coefficients") # most zeroed coef_table(coef,names) ############# # --- now a wordy multinomial model n.words <- 400 # use 100 + words (gets slower w/400) words <- paste(colnames(counts)[1:n.words],collapse="+") model <- as.formula(paste("~",others,"+",words,collapse="")) X <- model.matrix(model, data=Data) [,-1] # drop intercept dim(X) # 107, 207 ... multinom.regr <- glmnet(X[train,], Y[train], family='multinomial', alpha=1) par (mfrow=c(2,2)) plot(multinom.regr) reset() # look at coefficients (tables and clouds) c <- predict(multinom.regr, s=.010, exact=T, type="coefficients") # zero out many show_coefs <- function(coefs) { m <- mapply(as.vector, coefs) rownames(m) <- attributes(coefs[[1]])$Dimnames[[1]] # complex structure colnames(m) <- attributes(coefs)$names keep <- rowSums(abs(m))>0 return (m[keep,]) } mat <- show_coefs(c) dim(mat) round(mat,3) set.seed(663) # pick lambda by 10-fold CV cv.out<-cv.glmnet(X[train,],Y[train],family='multinomial', alpha=1) plot(cv.out) (bestlam<-cv.out$lambda.1se) (cv.out$lambda.min) coef <- predict(multinom.regr,s=bestlam,exact=T,type="coefficients") dim(coef) c<-show_coefs(coef) round(c,3) coef_clouds <- function(coefs) { require(wordcloud) require(stringr) mat <- mapply(as.vector, coefs) nm <- str_split_fixed(attributes(coefs[[1]])$Dimnames[[1]],"_",2)[,2] labels <- attributes(coefs)$names par(mfrow=c(2,ceiling(ncol(mat)/2))) for(i in 1:ncol(mat)) { wordcloud(nm[-1], abs(mat[-1,i])/max(abs(mat[-1,i])) * 100) } labels } reset() coef_clouds(coef) multinom.prob <- drop(predict(multinom.regr,s=bestlam,newx=X[test,], type='response')) # prob dim(multinom.prob) multinom.class <- predict(multinom.regr,s=bestlam,newx=X[test,], type='class') # highest class tab <- table(Data[test,"variety"],multinom.class); tab # multinom.class 100 # Cabernet Sauvignon Merlot Pinot Noir Zinfandel # Cabernet Sauvignon 191 31 22 6 # Merlot 61 133 47 9 # Pinot Noir 61 34 145 10 # Zinfandel 84 30 33 103 # multinom.class # Cabernet Sauvignon Merlot Pinot Noir Zinfandel # Cabernet Sauvignon 195 33 17 5 # Merlot 74 134 35 7 # Pinot Noir 64 43 136 7 # Zinfandel 86 30 28 106 } # --- try trees as competitor distinguish.varieties.tree <- function() { require(tree) require(gbm) # for boosting dim(counts); n.words <- ncol(counts) # use them all! words <- paste(colnames(counts)[1:n.words],collapse="+") model <- as.formula(paste("variety ~","lengths+",words,collapse="")) # need category myTree <-tree(model, data=Data, subset=train) summary(myTree) # uses very few plot(myTree); text(myTree, pretty=0, cex=0.7) # get rid of some "special" words i<-which(colnames(counts) == "w_pinot") j<-which(colnames(counts) == "w_zin") k<-which(colnames(counts) == "w_merlot") l<-which(colnames(counts) == "w_noir") # this one too subCounts <- counts[,-c(i,j,k,l)] dim(subCounts) words <- paste(colnames(subCounts),collapse="+") model <- as.formula(paste("variety ~","lengths+",words,collapse="")) myTree <-tree(model, data=Data, subset=train) summary(myTree) # uses very few plot(myTree); text(myTree, pretty=0, cex=0.7) myTree # prune tree (cv finds little to trim) set.seed(3423) cvTree <- cv.tree(myTree,FUN=prune.misclass) # min classfication error rate # par(mfrow=c(1,2)) # plot(cvTree$size, cvTree$dev,type="b",cex.lab=2) # terminal leaves # plot(cvTree$k , cvTree$dev,type="b",cex.lab=2) # k (complex parm) # par(mfrow=c(1,1)) # pruneTre=prune.misclass(myTree,best=XX) # trim back to best in CV # confusion matrix for tree tree.class <- predict(myTree, Data[test,], type="class") tab <- table(Data[test,"variety"], tree.class); tab # Cabernet Sauvignon Merlot Pinot Noir Zinfandel # Cabernet Sauvignon 129 49 64 8 # Merlot 16 154 67 13 # Pinot Noir 4 71 149 26 # Zinfandel 9 51 92 98 # --- boosting allows trees to find more structure require(gbm) bTree <- gbm(model, data=Data[train,], n.trees=400, cv.folds=5, shrinkage=.001, interaction.depth=3, distribution='multinomial') names(bTree) # shrinkage = learning rate bTree$n.minobsinnode # defaults to 10 pretty.gbm.tree(bTree, i.tree = 1) # details about component trees plot(bTree$cv.error) # need more, so increase the learning rate # increase learning rate (ideally would just run longer) bTree2 <- gbm(model, data=Data[train,], n.trees=400, cv.folds=5, shrinkage=.005, interaction.depth=3, distribution='multinomial') plot(bTree$cv.error, type='l', col='gray', ylim=range(bTree2$cv.error)) lines(bTree2$cv.error) # increase learning rate (ideally would just run longer) bTree3 <- gbm(model, data=Data[train,], n.trees=400, cv.folds=5, shrinkage=.010, interaction.depth=3, distribution='multinomial') plot(bTree$cv.error, type='l', col='gray', ylim=range(bTree3$cv.error)) lines(bTree2$cv.error, col='gray') lines(bTree3$cv.error) bt.prob <- drop(predict(bTree3,newdata=Data[test,],n.trees=400,type="response")) bt.class<- colnames(bt.prob)[apply(bt.prob,1,which.max)] tab <- table(Data[test,"variety"], bt.class); tab # Cabernet Sauvignon Merlot Pinot Noir Zinfandel # Cabernet Sauvignon 203 28 17 2 # Merlot 66 156 25 3 # Pinot Noir 46 65 133 6 # Zinfandel 89 37 33 91 # makes use of more features s <- summary(bTree3); rownames(s)<-NULL s[s$rel.inf>0,] }