##-------------------------------------------------------------------------------------- ## ## Federalist papers, naive Bayes ## ## is colon, is semicolon, is comma,

is period ## ##-------------------------------------------------------------------------------------- setwd("~/courses/mich/text_analytics/") source("R_scripts/text_utils.R") # --- load the data from file papers <- read.csv("~/data/text/federalist/federalist.csv", header = TRUE); # --- structure of data dim(papers); names(papers) # --- authors table(papers$author) papers$auth <- rep(NA,nrow(papers)) papers$auth[papers$author == "HAMILTON"] <- "Hamilton" papers$auth[papers$author == "MADISON" ] <- "Madison" # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # LSA # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # load text miner package library(tm) library(stringr) # convert papers into corpus v <- as.vector(as.character(papers$text)) fed.corpus <- Corpus( VectorSource(v) ) fed.corpus str_sub(fed.corpus[[1]]$content, 1, 200) # clean up (tokenize) fed.corpus <- tm_map(fed.corpus, content_transformer(tolower)) fed.corpus <- tm_map(fed.corpus, removeNumbers) fed.corpus <- tm_map(fed.corpus, removeWords, c(stopwords("english"), "","

","","")) # for whatever reason, removeWords won't remove these str_sub(fed.corpus[[1]]$content, 1, 200) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub(""," ",s))) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub("

"," ",s))) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub(""," ",s))) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub(""," ",s))) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub(""," ",s))) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub(""," ",s))) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub("\\)"," ",s))) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub("\\("," ",s))) fed.corpus <- tm_map(fed.corpus, content_transformer(function(s) gsub("%"," ",s))) fed.corpus <- tm_map(fed.corpus, stripWhitespace) inspect(fed.corpus[1]) # other tools (skip these) # fed.corpus <- tm_map(fed.corpus, stemDocument) # fed.corpus <- tm_map(fed.corpus, removePunctuation) # build term/document matrix (no weighting here) DTM <- DocumentTermMatrix(fed.corpus); DTM inspect(DTM[1:5,1:5]) # you can try stemming! # queries findFreqTerms(DTM, 800) findAssocs(DTM, "united", 0.6) findAssocs(DTM, "ambiguity", 0.6) findAssocs(DTM, "plurality", 0.6) findAssocs(DTM, "may", 0.5) counts <- as.matrix(DTM) freq <- colSums(counts) o <- order(freq, decreasing=T) freq <- freq[o] counts <- counts[,o] ##################################################################### # # Simple version of naive Bayes (omit probability smoothing) # ##################################################################### # which words are present in some, but not others? (limit to Hamilton/Madison documents) use <- which(papers$auth=="Hamilton" | papers$auth=="Madison") length(use) # 65 useCounts <- counts[use,] author <- papers$auth[use] wordAppearsDoc <- useCounts > 0 wordAppearsTotal <- colSums(wordAppearsDoc) # pick words used in at least 20, but not more than 40 (why... think tfidf) plot(wordAppearsTotal) words <- (20 <= wordAppearsTotal) & (wordAppearsTotal <= 40) # 357 words, but only 65 docs words[1:40] # too common to discriminate hamPaper <- author=="Hamilton" madPaper <- author=="Madison" wordAppearsHamTotal <- colSums(wordAppearsDoc[hamPaper,words]) wordAppearsMadTotal <- colSums(wordAppearsDoc[madPaper,words]) head(wordAppearsHamTotal) head(wordAppearsMadTotal) pWord.Ham <- wordAppearsHamTotal/sum(hamPaper) plot(pWord.Ham); range(pWord.Ham) # sanity check again! pWord.Mad <- wordAppearsMadTotal/sum(madPaper) plot(pWord.Mad); range(pWord.Mad) sum(pWord.Mad==0) pWord.Mad[pWord.Mad==0] <- 1/14 # very clumsy smoothing... but only 2 # --- try it out for paper with known author (i <- which(hamPaper)[1]) # (i <- which(madPaper)[1]) scoreHam <- sum(log(pWord.Ham[wordAppearsDoc[i,words]])) scoreMad <- sum(log(pWord.Mad[wordAppearsDoc[i,words]])) c(scoreHam, scoreMad) # A hamilton paper (i=1) or madison (i=6) # --- for a paper of unknown authorship? 49-60 paper <- 53 wordAppears <- counts[paper,words]>0 scoreHam <- sum(log(pWord.Ham[wordAppears])) scoreMad <- sum(log(pWord.Mad[wordAppears])) c(scoreHam, scoreMad)