Collecting, Analyzing and Presenting data about the participation in #ilovefs day
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

collecto.R 13KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. ################################################################################
  2. # collecto.R #
  3. # Collecting data from different social media sources with a specified #
  4. # searchterm/criteria #
  5. # Jan Weymeirsch janwey@fsfe.org #
  6. ################################################################################
  7. ### Loading Packages {{{ ----
  8. #### Twitter
  9. install.packages("twitteR")
  10. library("twitteR")
  11. # had to install "httr" via packagemanager
  12. #### Fediverse (eg: mastodon)
  13. install.packages("curl")
  14. library("curl")
  15. install.packages("rjson")
  16. library(rjson)
  17. ### Reddit
  18. install.packages("RedditExtractoR")
  19. library("RedditExtractoR")
  20. # }}}
  21. ## Twitter Collector {{{ ----
  22. ### Authenticate to Twitter
  23. #### Manual input (uncomment if needed)
  24. #twitter_consumerkey <- readline("[Twitter] Enter your consumer API key.")
  25. #twitter_consumerpri <- readline("[Twitter] Enter your consumer API secret.")
  26. #twitter_tokenaccess <- readline("[Twitter] Enter your Access Token.")
  27. #twitter_tokensecret <- readline("[Twitter] Enter your Token Secret.")
  28. #### Saved credentials
  29. twitter_api_cred <- read.table(file = "./twitter_api.txt", header = TRUE, sep = ";")
  30. twitter_consumerkey <- as.character(twitter_api_cred$consumer_key)
  31. twitter_consumerpri <- as.character(twitter_api_cred$consumer_private)
  32. twitter_tokenaccess <- as.character(twitter_api_cred$access_token)
  33. twitter_tokensecret <- as.character(twitter_api_cred$token_secret)
  34. setup_twitter_oauth(consumer_key = twitter_consumerkey,
  35. consumer_secret = twitter_consumerpri,
  36. access_token = twitter_tokenaccess,
  37. access_secret = twitter_tokensecret)
  38. # Note -------------------------------------------------------------------------
  39. # Please refer to the Documentation on where to receive your API credentials.
  40. # ------------------------------------------------------------------------------
  41. ### Collecting Tweets
  42. twitter_tw_dirty <- searchTwitter(search = "ilovefs",
  43. since = "2018-01-01",
  44. until = "2018-12-31",
  45. n = 100,
  46. resultType = "recent")
  47. ### strip off retweets
  48. twitter_tw <- strip_retweets(tweets = twitter_tw_dirty,
  49. strip_manual = FALSE,
  50. strip_mt = FALSE)
  51. ### Extract relevant data from dataset
  52. twitter_timedate <- c()
  53. twitter_client <- c()
  54. twitter_name <- c()
  55. twitter_rts <- c()
  56. twitter_fav <- c()
  57. twitter_url <- c()
  58. twitter_txt <- c()
  59. for(i in 1:length(twitter_tw)){
  60. #### Time of tweet
  61. if(length(twitter_tw[[i]]$created) > 0){
  62. twitter_timedate[i] <- as.character(twitter_tw[[i]]$created)
  63. } else {
  64. # insert empty value, if it does not exist
  65. twitter_timedate[i] <- NA
  66. }
  67. #### Client used
  68. if(length(twitter_tw[[i]]$statusSource) > 0){
  69. twitter_client[i] <- as.character(twitter_tw[[i]]$statusSource)
  70. } else {
  71. # insert empty value, if it does not exist
  72. twitter_client[i] <- NA
  73. }
  74. #### Screen names / Twitter Handles
  75. if(length(twitter_tw[[i]]$screenName) > 0){
  76. twitter_name[i] <- as.character(twitter_tw[[i]]$screenName)
  77. } else {
  78. # insert empty value, if it does not exist
  79. twitter_name[i] <- NA
  80. }
  81. #### Number of retweets
  82. if(length(twitter_tw[[i]]$retweetCount) > 0){
  83. twitter_rts[i] <- as.character(twitter_tw[[i]]$retweetCount)
  84. } else {
  85. # insert empty value, if it does not exist
  86. twitter_rts[i] <- NA
  87. }
  88. #### Number of favorites
  89. if(length(twitter_tw[[i]]$favoriteCount) > 0){
  90. twitter_fav[i] <- as.character(twitter_tw[[i]]$favoriteCount)
  91. } else {
  92. # insert empty value, if it does not exist
  93. twitter_fav[i] <- NA
  94. }
  95. #### URLs posted about
  96. if(length(twitter_tw[[i]]$urls$expanded_url) > 0){
  97. twitter_url[i] <- as.character(twitter_tw[[i]]$urls$expanded_url)
  98. } else {
  99. # insert empty value, if it does not exist
  100. twitter_url[i] <- NA
  101. }
  102. #### actual tweet/text
  103. if(length(twitter_tw[[i]]$text) > 0){
  104. twitter_txt[i] <- as.character(twitter_tw[[i]]$text)
  105. } else {
  106. # insert empty value, if it does not exist
  107. twitter_txt[i] <- NA
  108. }
  109. }
  110. ### Removing HTML-Tags from Client-info
  111. twitter_client <- sub(pattern = ".*\">", replace = "", x = twitter_client)
  112. twitter_client <- sub(pattern = "</a>", replace = "", x = twitter_client)
  113. ### Forming variables for dataframe
  114. time <- sub(pattern = ".* ", x = twitter_timedate, replace = "")
  115. time <- as.numeric(gsub(pattern = ":", x = time, replace = ""))
  116. date <- sub(pattern = " .*", x = twitter_timedate, replace = "")
  117. date <- as.numeric(gsub(pattern = "-", x = date, replace = ""))
  118. retw <- as.factor(twitter_rts)
  119. favs <- as.factor(twitter_fav)
  120. link <- as.character(twitter_url)
  121. text <- as.character(twitter_txt)
  122. clnt <- as.character(twitter_client)
  123. ### Creating dataframe
  124. twitter <- data.frame(cbind(date, time, retw, favs, text, link, clnt))
  125. #### Clean-Up
  126. rm(list = c("date", "time", "retw", "favs", "text", "link", "clnt"))
  127. twitter <- within(data = twitter, expr = {
  128. date <- as.numeric(as.character(date));
  129. time <- as.numeric(as.character(time));
  130. text <- as.character(text);
  131. link <- as.character(link);
  132. })
  133. # }}}
  134. ## Mastodon Collector with curl {{{ ----
  135. mastodon.fetchdata <- function(data){
  136. tmp_datetime <- c()
  137. tmp_lang <- c()
  138. tmp_inst <- c()
  139. tmp_link <- c()
  140. tmp_text <- c()
  141. tmp_reto <- c()
  142. tmp_favs <- c()
  143. tmp_murl <- c()
  144. for(i in 1:length(data)){
  145. #### Time and Date of Toot
  146. if(length(data[[i]]$created_at) > 0){
  147. tmp_datetime[i] <- data[[i]]$created_at
  148. } else {
  149. # insert empty value, if it does not exist
  150. tmp_datetime[i] <- NA
  151. }
  152. #### Language of Toot
  153. if(length(data[[i]]$language) > 0){
  154. tmp_lang[i] <- data[[i]]$language
  155. } else {
  156. # insert empty value, if it does not exist
  157. tmp_lang[i] <- NA
  158. }
  159. #### Instance of Toot
  160. if(length(data[[i]]$uri) > 0){
  161. tmp_inst[i] <- data[[i]]$uri
  162. } else {
  163. # insert empty value, if it does not exist
  164. tmp_inst[i] <- NA
  165. }
  166. #### URL of Toot
  167. if(length(data[[i]]$url) > 0){
  168. tmp_link[i] <- data[[i]]$url
  169. } else {
  170. # insert empty value, if it does not exist
  171. tmp_link[i] <- NA
  172. }
  173. #### Text/Content of Toot
  174. if(length(data[[i]]$content) > 0){
  175. tmp_text[i] <- data[[i]]$content
  176. } else {
  177. # insert empty value, if it does not exist
  178. tmp_text[i] <- NA
  179. }
  180. #### Number of Retoots
  181. if(length(data[[i]]$reblogs_count) > 0){
  182. tmp_reto[i] <- data[[i]]$reblogs_count
  183. } else {
  184. # insert empty value, if it does not exist
  185. tmp_reto[i] <- NA
  186. }
  187. #### Number of Favorites
  188. if(length(data[[i]]$favourites_count) > 0){
  189. tmp_favs[i] <- data[[i]]$favourites_count
  190. } else {
  191. # insert empty value, if it does not exist
  192. tmp_favs[i] <- NA
  193. }
  194. #### Number of Favorites
  195. if(length(data[[i]]$media_attachments) > 0){
  196. tmp_murl[i] <- data[[i]]$media_attachments[[1]]$url
  197. } else {
  198. # insert empty value, if it does not exist
  199. tmp_murl[i] <- NA
  200. }
  201. }
  202. return(data.frame(cbind(tmp_datetime,
  203. tmp_lang,
  204. tmp_inst,
  205. tmp_text,
  206. tmp_link,
  207. tmp_reto,
  208. tmp_favs,
  209. tmp_murl)))
  210. }
  211. datetime <- c()
  212. lang <- c()
  213. inst <- c()
  214. link <- c()
  215. text <- c()
  216. reto <- c()
  217. favs <- c()
  218. murl <- c()
  219. for(i in 1:10){
  220. if(i == 1){
  221. mastodon_instance <- "https://mastodon.social"
  222. mastodon_hashtag <- "ilovefs"
  223. mastodon_url <- paste0(mastodon_instance,
  224. "/api/v1/timelines/tag/",
  225. mastodon_hashtag,
  226. "?limit=40")
  227. } else {
  228. mastodon_lheader <- parse_headers(mastodon_reqres$headers)[11]
  229. mastodon_next <- sub(x = mastodon_lheader, pattern = ".*link:\ <", replace = "")
  230. mastodon_url <- sub(x = mastodon_next, pattern = ">;\ rel=\"next\".*", replace = "")
  231. }
  232. mastodon_reqres <- curl_fetch_memory(mastodon_url)
  233. mastodon_rawjson <- rawToChar(mastodon_reqres$content)
  234. toots <- fromJSON(mastodon_rawjson)
  235. if(length(toots) > 0){
  236. tmp_mastodon_df <- mastodon.fetchdata(data = toots)
  237. datetime <- c(datetime, as.character(tmp_mastodon_df$tmp_datetime))
  238. lang <- c(lang, as.character(tmp_mastodon_df$tmp_lang))
  239. inst <- c(inst, as.character(tmp_mastodon_df$tmp_inst))
  240. link <- c(link, as.character(tmp_mastodon_df$tmp_link))
  241. text <- c(text, as.character(tmp_mastodon_df$tmp_text))
  242. reto <- c(reto, as.character(tmp_mastodon_df$tmp_reto))
  243. favs <- c(favs, as.character(tmp_mastodon_df$tmp_favs))
  244. murl <- c(murl, as.character(tmp_mastodon_df$tmp_murl))
  245. } else {
  246. break
  247. }
  248. }
  249. ### Time of post
  250. #### date (as numeric value)
  251. date <- sub(pattern = "T.*", x = datetime, replacement = "")
  252. date <- gsub(pattern = "-", x = date, replacement = "")
  253. date <- as.numeric(date)
  254. #### time (as numeric value)
  255. time <- sub(pattern = ".*T", x = datetime, replacement = "")
  256. time <- sub(pattern = "\\..*", x = time, replacement = "")
  257. time <- gsub(pattern = ":", x = time, replacement = "")
  258. time <- as.numeric(time)
  259. ### Removing HTML-Tags from Toots
  260. text <- gsub(pattern = "<.*?>", x = text, replacement = "")
  261. text <- gsub(pattern = " ", x = text, replacement = "")
  262. ### Cleaning Instance-String
  263. #### GNUsocial
  264. inst <- sub(pattern = "tag:", x = inst, replacement = "")
  265. inst <- sub(pattern = ",\\d+.*", x = inst, replacement = "")
  266. #### Mastodon
  267. inst <- sub(pattern = "https:\\/\\/", x = inst, replacement = "")
  268. inst <- sub(pattern = "\\/.*", x = inst, replacement = "")
  269. ### Only include Toots from this year
  270. mastodon_exclude <- which(date < 20180101)
  271. date <- date[-mastodon_exclude]
  272. time <- time[-mastodon_exclude]
  273. lang <- lang[-mastodon_exclude]
  274. inst <- inst[-mastodon_exclude]
  275. text <- text[-mastodon_exclude]
  276. link <- link[-mastodon_exclude]
  277. reto <- reto[-mastodon_exclude]
  278. favs <- favs[-mastodon_exclude]
  279. murl <- murl[-mastodon_exclude]
  280. ### Creating dataframe
  281. mastodon <- data.frame(cbind(date, time, lang, inst, text, link, reto, favs, murl))
  282. #### Clean-Up
  283. rm(list = c("date", "time", "lang", "inst", "text", "link", "favs", "reto", "murl", "datetime"))
  284. mastodon <- within(data = mastodon, expr = {
  285. date <- as.numeric(as.character(date));
  286. time <- as.numeric(as.character(time));
  287. text <- as.character(text);
  288. link <- as.character(link);
  289. murl <- as.character(murl);
  290. })
  291. # }}}
  292. ## Reddit Collector {{{ ----
  293. ### Authentication at Reddit
  294. # no authentication necessary, hence we can directly start scraping
  295. ### Get posts on Reddit
  296. reddit_post_dirty <- reddit_urls(search_terms = "ilovefs",
  297. #subreddit = "freesoftware linux opensource",
  298. cn_threshold = 0,
  299. page_threshold = 99999,
  300. sort_by = "new",
  301. wait_time = 5)
  302. ### Only use posts from the current year
  303. reddit_searchinyear <- 18 # has to have format "YY", eg "18" for "2018"
  304. reddit_post_year <- gsub(x = reddit_post_dirty$date,
  305. pattern = "\\d.-\\d.-",
  306. replace = "")
  307. reddit_post <- reddit_post_dirty[which(reddit_post_year == reddit_searchinyear),]
  308. ### Extracting relevant variables
  309. comt <- c() # Comments / Replies
  310. subr <- c() # Subreddit
  311. ptns <- c() # Points / Score
  312. ttle <- c() # Title
  313. text <- c() # Text / Content
  314. link <- c() # Linked to Website
  315. date <- c() # Date
  316. rurl <- c() # Reddit-URL of post
  317. for(i in c(1:length(reddit_post$URL))){
  318. comt[i] <- reddit_post$num_comments[i]
  319. ttle[i] <- reddit_post$title[i]
  320. rurl[i] <- reddit_post$URL[i]
  321. date[i] <- gsub(x = reddit_post$date[i], pattern = "-", replace = "")
  322. subr[i] <- reddit_post$subreddit[i]
  323. Sys.sleep(2)
  324. reddit_content <- reddit_content(URL = reddit_post$URL[i], wait_time = 0)
  325. ptns[i] <- reddit_content$post_score[1]
  326. text[i] <- reddit_content$post_text[1]
  327. link[i] <- reddit_content$link[1]
  328. }
  329. ### Creating dataframe
  330. reddit <- data.frame(cbind(date, rurl, link, text, ttle, ptns, subr, comt))
  331. #### Clean-Up
  332. rm(list = c("date", "rurl", "link", "text", "ttle", "ptns", "subr", "comt"))
  333. reddit <- within(data = reddit, expr = {
  334. date <- as.numeric(as.character(date));
  335. rurl <- as.character(rurl);
  336. link <- as.character(link);
  337. text <- as.character(text);
  338. ttle <- as.character(ttle);
  339. ptns <- as.numeric(as.character(ptns));
  340. subr <- as.character(subr);
  341. comt <- as.numeric(as.character(comt));
  342. })
  343. # }}}
  344. ### Exporting data {{{ ----
  345. time_of_saving <- sub(x = Sys.time(), pattern = " CET", replace = "")
  346. time_of_saving <- sub(x = time_of_saving, pattern = " ", replace = "_")
  347. time_of_saving <- gsub(x = time_of_saving, pattern = ":", replace = "-")
  348. #### RData
  349. save_path <- paste0("./data/ilovefs-all_", time_of_saving, ".RData")
  350. save(list = c("twitter", "mastodon", "reddit"), file = save_path)
  351. #### Text
  352. ##### Fediverse
  353. save_path_fed_t <- paste0("./data/ilovefs-fediverse_", time_of_saving, ".txt")
  354. write.table(mastodon, file = save_path_fed_t)
  355. ##### Twitter
  356. save_path_twitter_t <- paste0("./data/ilovefs-twitter_", time_of_saving, ".txt")
  357. write.table(twitter, file = save_path_twitter_t)
  358. ##### Reddit
  359. save_path_reddit_t <- paste0("./data/ilovefs-reddit_", time_of_saving, ".txt")
  360. write.table(reddit, file = save_path_reddit_t)
  361. #### CSV
  362. ##### Fediverse
  363. save_path_fed_c <- paste0("./data/ilovefs-fediverse_", time_of_saving, ".csv")
  364. write.csv(mastodon, file = save_path_fed_c)
  365. ##### Twitter
  366. save_path_twitter_c <- paste0("./data/ilovefs-twitter_", time_of_saving, ".csv")
  367. write.csv(twitter, file = save_path_twitter_c)
  368. ##### Reddit
  369. save_path_reddit_c <- paste0("./data/ilovefs-reddit_", time_of_saving, ".csv")
  370. write.csv(reddit, file = save_path_reddit_c)
  371. # }}}