rm(list=ls(all=TRUE)) setwd("C:/Users/luigi/Dropbox/TOPIC MODEL/") library(readtext) library(quanteda) library(quanteda.textmodels) library(cowplot) library(PerformanceAnalytics) library(psych) library(quanteda.textplots) library(quanteda.textstats) library(ggplot2) ######################################################################### ######################################################################### # Creating the Corpus of the UK electoral programs 1992, 1997 ######################################################################### ######################################################################### myText <- readtext("Lecture 2/Wordscores manifestos/UK/*.txt", docvarsfrom = "filenames", dvsep = " ", docvarnames = c("Party", "Year")) str(myText) # Text pre-processing: let's make some cleaning; # for example the apostrophe (Quanteda struggles with that...) myText$text <- gsub("'"," ",myText$text) testCorpus <- corpus(myText ) summary(testCorpus) # I rename the name of the documents docnames(testCorpus) <- gsub(".txt", "", docnames(testCorpus )) summary(testCorpus) tok2 <- tokens(testCorpus, remove_punct = TRUE, remove_numbers=TRUE, remove_symbols = TRUE, split_hyphens = TRUE, remove_separators = TRUE) tok2 <- tokens_remove(tok2, stopwords("en")) tok2 <- tokens_wordstem (tok2) ##################### # Remember! Comparing the results with and w/o stopwords, with and w/o stemming is always a good practice ##################### myDfm <- dfm(tok2 ) topfeatures(myDfm , 20) # 20 top words # let's keep just those features with at least 2 characters (to remove for example the "s") myDfm <- dfm_remove(myDfm, min_nchar=2) topfeatures(myDfm , 20) # 20 top words ######################################################################### ######################################################################### # Using Wordfish ######################################################################### ######################################################################### # dir indicates which two documents are used for global identification purposes # (the first document to the left of the second one); # this matters only for the interpretation of the results (i.e., for the direction of the scores # along the latent dimension (which positive, which negative ones)), not for the estimation per-se str(myDfm) # here: LAB 92 to the left of CONS 92 summary(testCorpus) wfm <- textmodel_wordfish(myDfm, dir = c(3, 1)) summary(wfm) # here: CONS 92 to the left of LAB 92 wfm2 <- textmodel_wordfish(myDfm, dir = c(1, 3)) summary(wfm2) # compare with previous case! summary(wfm) summary(wfm2) cor(wfm$theta, wfm2$theta) # what does it happen if now you put LIB 92 to the left of CONS 92? summary(testCorpus) wfm3 <- textmodel_wordfish(myDfm, dir = c(5, 1)) summary(wfm3) # Nothing! cor(wfm$theta, wfm3$theta) identical(wfm$theta, wfm3$theta) # Always do some Diagnostic! # A good start for diagnostics is the analysis of word discrimination parameters. # Weights with large values mean that these features are estimated to be on the extremes of the dimension # let's learn how to extract the estimates of the model and save them # let's start with extracting the features str(wfm) words2 <- wfm$features beta2 <-wfm$beta psi2 <-wfm$psi scores_words2 <-data.frame(words2, beta2, psi2) str(scores_words2) # as expected, the correlation is negative: the largest is psi, the lower is beta and viceversa cor(scores_words2$beta2, scores_words2$psi2) # top 40 features for negative beta: renounce, democrat, banks, enterprises, pollutants, nationals head(scores_words2[order(scores_words2$beta2),], 40) # top 40 words for positive beta: producers, preservation, efforts, enrich, volunteering tail(scores_words2[order(scores_words2$beta2),], 40) # in this case we have just 6 documents and it's not very clear the meaning of the latent dimension just # by looking at betas (at least the first 40 features). Perhaps progressive vs. conservative? # Plot estimated word positions by also highlighting specific words textplot_scale1d(wfm, margin = "features", highlighted = c("renounce", "pollutants", "democrat", "producers", "efforts", "volunteering"), highlighted_color = "red", alpha=0.3) # Plot estimated document positions summary(testCorpus) textplot_scale1d(wfm, margin = "documents") textplot_scale1d(wfm, margin = "documents", groups = docvars(testCorpus, "Party")) textplot_scale1d(wfm, margin = "documents", groups = docvars(testCorpus, "Year")) # PLZ NOTE a first important point: # The Poisson model (on which Wordfish is based) assumes that the variance is equal to the mean, which is not # always a fair assumption. When the variance is greater than the mean, a Quasi-Poisson model (negative binomial), # which assumes that the variance is a linear function of the mean, is more appropriate. # Using the option "dispersion = c("quasipoisson")" you estimate your model using a negative # binomial distribution with a separate overdispersion parameter # (that adjusts the variance independently from the mean) for each document. # Till some years ago, everyone was using the poisson model when estimating Wordfish to save computational times. # Nowadays we have less constraints in this regard wfmQP <- textmodel_wordfish(myDfm, dir = c(3, 1), dispersion = c("quasipoisson")) summary(wfmQP) # Correlation of both thetas and betas pretty hight! cor(wfm$theta, wfmQP$theta) cor(wfm$beta, wfmQP$beta) # PLZ NOTE a second important point: # Wordfish in the Quanteda package implements asymptotic standard errors. These SEs rely however heavily on the model # being correctly specified (an heroic assumption when dealing with text-analysis: remember the First Principles!). # As a way of obtaining uncertainty estimates with weaker assumptions, Lowe and Benoit (2013) also introduced a # bootstrap procedure, that basically iterates across differen (bootstrapped) samples of the original DfM and then average # the results. The Quanteda package supplies functionality for random sampling of Words [dfm_sample], which can be used to # implement the above bootstrap procedure with relative ease. If you are interest about it, just drop me an email! # And check the EXTRA script on-line on the course homepage for Lab 2 ######################################################################### ######################################################################### # Using wordfish: US Presidential Inaugural Speech after 1980 ######################################################################### ######################################################################### # apply wordfish by first considering Reagan 1981 to the right of Obama 2009; # and then Trump 2017 to the right of Obama 2009: any change? corpus_Pres <- corpus_subset(data_corpus_inaugural, Year > 1980) tok2 <- tokens(corpus_Pres , remove_punct = TRUE, remove_numbers=TRUE, remove_symbols = TRUE, split_hyphens = TRUE, remove_separators = TRUE) tok2 <- tokens_remove(tok2, stopwords("en")) tok2 <- tokens_wordstem (tok2) presDfm <- dfm(tok2) presDfm [, 1:10] str(presDfm) presDfm@Dimnames$docs # Obama 2009 to the left of Reagan 1981 wfm <- textmodel_wordfish(presDfm , dir = c(8, 1)) summary(wfm) words2 <- wfm$features beta2 <-wfm$beta psi2 <-wfm$psi scores_words2 <-data.frame(words2, beta2, psi2) str(scores_words2) # top 40 words for negative beta: tyranni; ideolog; murder; tyrant; unlimit head(scores_words2[order(scores_words2$beta2),], 40) # top 40 words for positive beta: rainbow; western; sovereignti; cheer tail(scores_words2[order(scores_words2$beta2),], 40) # meaning of the latent dimension: pessimism vs. optimism side of power? # for use, the latent does not appear to be related to ideology! As you can also see below # when you plot the documents (according to face validity) # Highlight specific words textplot_scale1d(wfm, margin = "features", highlighted = c("tyranni", "murder", "mortal", "cheer", "rainbow", "sovereignti"), highlighted_color = "red", alpha=0.3) # Plot estimated document positions textplot_scale1d(wfm, margin = "documents") # Obama 2009 to the left of Trump 2017 wfm2 <- textmodel_wordfish(presDfm , dir = c(8, 10)) summary(wfm2) # Plot estimated document positions textplot_scale1d(wfm2, margin = "documents") # check for the correlation score_reagan <-wfm$theta score_trump <-wfm2$theta cor(score_reagan, score_trump) identical(score_reagan, score_trump) # PLZ NOTE: if you are interested in Wordshoal, check the EXTRA script on-line on the course # homepage for Lab 2