Need Help with this Question or something similar to this? We got you! Just fill out the order form (follow the link below), and your paper will be assigned to an expert to help you ASAP.
Learning Goal: I’m working on a r project and need an explanation and answer to help me learn.I am conducting a sentiment analysis on Reddit and when I run the code, none of the bar charts are showing up in the plot window. Only the wordclouds work. What am I doing wrong?install.packages(“RedditExtractoR”)library(RedditExtractoR)install.packages(“wordcloud”)library(wordcloud) #for visualizing install.packages(“tidyverse”)library(tidyverse) #for various data manipulation tasksinstall.packages(“tidytext”)library(tidytext) #for text mining specifically, main package in bookinstall.packages(“dplyr”)library(dplyr)install.packages(“stringr”)library(stringr) #for various text operationsinstall.packages(“readtext”)library(readtext) # for reading in txt filesinstall.packages(“tm”)library(tm)install.packages(“reshape2”)library(reshape2)library(ggplot2)#Getting the linkslinks = find_thread_urls(keywords = “Pepsi Nitro”, sort_by = “top”)View(links)#Check the attributesstr(links)#Extract thread contents for the first 50threads_contents = get_thread_content(links$url)str(threads_contents$threads) # thread metadatastr(threads_contents$comments)comments = threads_contents[[“comments”]]#Cleaning the datacomments_only = comments %>% select(comment)tidy_comments = comments_only %>% filter(comment != “[deleted]”) %>% filter(comment != “[removed]”) %>% filter(!str_detect(comment, “your comment has been removed”))tidy_comments2 = tidy_comments %>% unnest_tokens(output = word, input = comment)tidy_comments3 = anti_join(tidy_comments2, stop_words, by = “word”)my_stopwords = c(“https”, “http”, “gt”, “amp”, “1”, “2”, “3”, “10”, “don”, “www.reddit.com”, “x200b”, “i.imgur.com”)tidy_comments4 =tidy_comments3 %>% filter(!word %in% my_stopwords)#Removing numberstidy_comments = tidy_comments4[-grep(“^[0-9]+”, tidy_comments4$word),]comments_clean = tibble(text = tidy_comments)#Counting frequent wordscomments_freq = comments_clean %>% count(text, sort = T)#Bar graph for frequent wordscomments_graph = comments_freq %>% filter(n > 250) %>% mutate(text = reorder(text, n)) %>% ggplot(aes(text, n)) + geom_col() + xlab(NULL) + coord_flip()comments_graph#Wordcloudcomments_freq %>% with(wordcloud(unique(text), n, min.words = 1, max.words = 250, random.order = F , color = c(“blue”, “red”), scale = c(3, 0.3)))#Get the sentiments dictionariesget_sentiments(“afinn”)get_sentiments(“bing”)get_sentiments(“nrc”)#Extract the sentimentsSentiments <- comments_clean %>% tibble() %>% unnest_tokens(word, text) %>% anti_join(stop_words) %>% inner_join(get_sentiments(“bing”))Sentiments#Group and count the total number of sentiments in the commentsSentiments %>% group_by(sentiment) %>% count(sentiment, sort = T)#Create a wordcloud for the sentimentswordcloudData = comments_clean %>% select(text)%>% unnest_tokens(output=word,input=text)%>% anti_join(stop_words)%>% inner_join(get_sentiments(‘bing’))%>% count(sentiment,word,sort=T)%>% spread(key=sentiment,value = n,fill=0)%>% data.frame()rownames(wordcloudData) = wordcloudData[,’word’]wordcloudData2 = wordcloudData[,c(‘positive’,’negative’)]set.seed(617)comparison.cloud(term.matrix = wordcloudData2,scale = c(2,0.5), max.words = 100, rot.per=0)#Bar graph for sentiments##Positive sentimentswordcloudData %>% filter(positive>70) %>% ggplot(aes(x=positive, y=reorder(word, positive), fill=positive)) + geom_col(show.legend=FALSE) + labs( x = “Frequency”, y = “Word”, title = “Top Positive sentiments in the comments” ) + theme_minimal() + theme(axis.text.x = element_text(angle = 45))##Negative sentimentswordcloudData %>% filter(negative>70) %>% ggplot(aes(x=negative, y=reorder(word, negative), fill=negative)) + geom_col(show.legend=FALSE) + labs( x = “Frequency”, y = “Word”, title = “Top Negative sentiments in the comments” ) + theme_minimal() +
Requirements: as needed | .doc file