-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathzombiefirms_code.R
1404 lines (1181 loc) · 67.3 KB
/
zombiefirms_code.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
options(digits = 3)
library(ggplot2)
library(plyr)
library(dplyr)
library(quanteda)
library(quanteda.textstats)
library(quanteda.textmodels)
library(quanteda.textplots)
library(topicmodels)
library(stm)
library(tidytext)
library(plotly)
library(lubridate)
library(corpus)
library(lubridate)
library(stringr)
library(ggrepel)
library(tidyr)
library(dplyr)
library(tidytext)
library(igraph)
library(gdata)
library(readtext)
library(tm)
library(tm.plugin.factiva)
library(RNewsflow)
library(readxl)
library(stringr)
# Original data cannot be shared. We refer to the name of the originated files processed.
# The code runs equally for the German and the Italian corpora, except where specified. Code here can report either of the countries' documents to facilitate examples.
# corpus_de08 = German corpus
# corpus_it08 = Italian corpus
# dfm_de = German document-feature matrix
# dfm_it = Italian document-feature matrix
# tx_de = German data-frame processed data
# tx_it = Italian data-frame processed data
# set working directory
setwd("C:/Users/rocpa/OneDrive/Documenti/GitHub/zombiefirms/")
# UTILS FOR BACKGROUND DATA PROCESSING ######
# to check if a term is included in the document-feature matrix #####
textstat_frequency(dfm_de) %>% subset(feature %in% "europäische_zentralbank")
test_term <- quanteda::convert(corpus_de08,to = "data.frame")
unique(stringr::str_extract_all(test_term$text,
"\\b[:alnum:]*\\-?[:alnum:]*\\-?[:alnum:] deutschen \\-?[:alnum:]*\\-?[:alnum:]*\\b"))
tt <- tx_de %>% filter(str_detect(text,"greensill"))
# bigram and trigram detect #####
bigrams_tx <- tx_it %>% filter(country == "Italy") %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
bigrams_tx %>% dplyr::count(bigram, sort = TRUE)
bigrams_separate <- bigrams_tx %>% separate(bigram,c("word1","word2"),sep=" ")
bigrams_filtered <- bigrams_separate %>%
filter(!word1 %in% stopwords_it) %>%
filter(!word2 %in% stopwords_it)
bigrams_filtered <- bigrams_filtered %>% dplyr::count(word1, word2, sort = TRUE)
bigrams_united <- bigrams_filtered %>% unite(bigram, word1, word2, sep = " ")
bigrams_united <- unique(bigrams_united)
bigrams_united <- bigrams_united %>% filter(! bigram %in% zombiefirm_pattern_it)
write.csv(bigrams_united,"bigrams_it2.csv",row.names= F)
trigrams_tx <- tx_de %>% filter(country == "Germany") %>% unnest_tokens(trigram, text, token = "ngrams", n = 3)
trigrams_tx %>% dplyr::count(trigram, sort = TRUE)
trigrams_separate <- trigrams_tx %>% separate(trigram,c("word1","word2","word3"),sep=" ")
trigrams_filtered <- trigrams_separate %>%
filter(!word1 %in% stopwords_de) %>%
filter(!word2 %in% stopwords_de) %>%
filter(!word3 %in% stopwords_de)
trigrams_filtered <- trigrams_filtered %>% dplyr::count(word1, word2,word3, sort = TRUE)
trigrams_united <- trigrams_filtered %>% unite(trigram, word1, word2,word3, sep = " ")
trigrams_united <- unique(trigrams_united)
# SEARCH STRATEGY ####
# German keywords: zombieunternehmen, zombiefirma, zombiefirmen, zombie firm, zombie firms, zombie-firma, zombie-firmen, zombie-unternehmen, zombie company, zombie companies, zombi-firma, zombi-firmen, zombi-unternehmen,industrie zombie ,industrien zombie ,industrie zoombie ,industrien zoombie ,industrie zombi ,industrien zombi ,industrie-zombie , industrien-zombie ,industrie-zoombie ,industrien-zoombie ,industrie-zombi ,industrien-zombi ,industriezombie ,industrienzombie ,industriezoombie ,industrienzoombie ,industriezombi ,industrienzombi, insolvenzantragspflicht
# Italian keywords: aziende zombie, azienda zombie, azienda zombi, aziende zombi, impresa zombie, impresa zombi, imprese zombie, impresa zombi, zombie company, zombie companies, zombi company, zombi companies, società zombie, oranizzazione zombie, organizzazione zombi, organizzazioni zombie, organizzazioni zombi, attività zombie , attività zoombie , attività zombi ,attività-zombie ,attività-zoombie ,attività-zombi ,attivitá zombie ,attivitá zoombie ,attivitá zombi ,attivitá-zombie ,attivitá-zoombie ,attivitá-zombi, industrie decotte, industria decotta, azienda decotta, aziende decotte, compagnia decotta, compagnie decotte, società decotta, organizzazione decotta, organizzazioni decotte
# Source: Factiva database, personal newspapers subscription, newspaper's trade agreement
# Screening of newspaper articles: mentioning zombiefirms terms and covid terms. See cleanout texts for exclusion criteria
# Corpora with saved documents can not be shared because of legal agreeements
# Section "Germany collection raw data" and "Italy collection raw data" are for informative purpose on how raw data were collected
# Data analysis starts from DATA ANALYSIS
# Germany collection raw data (ignore) #####
# html from Factiva download
filenames <- list.files( pattern="*.html", full.names=TRUE)
# source read by Factiva for each document. It creates a nested list
source_list <- lapply(filenames,FactivaSource)
# last Factiva passage for each document
raw_list <- lapply(source_list,Corpus,list(language = NA))
n <- length(raw_list)
# to check the number of the list's elements. This is to arrange the number within the brackets below
# Vector with all elements of the nested list. To avoid writing each element
corpus_vector <- c(raw_list[[ 1 ]],
raw_list[[ 2 ]],
raw_list[[ 3 ]],
raw_list[[ 3 ]],
raw_list[[ 4 ]],
raw_list[[ 5 ]],
raw_list[[ 6 ]],
raw_list[[ 7 ]],
raw_list[[ 8 ]],
raw_list[[ 9 ]],
raw_list[[ 10 ]],
raw_list[[ 11 ]],
raw_list[[ 12 ]],
raw_list[[ 13 ]],
raw_list[[ 14 ]],
raw_list[[ 15 ]],
raw_list[[ 16 ]],
raw_list[[ 17 ]],
raw_list[[ 18 ]],
raw_list[[ 19 ]],
raw_list[[ 20 ]],
raw_list[[ 21 ]],
raw_list[[ 22 ]],
raw_list[[ 23 ]],
raw_list[[ 24 ]],
raw_list[[ 25 ]],
raw_list[[ 26 ]],
raw_list[[ 27 ]],
raw_list[[ 28 ]],
raw_list[[ 29 ]],
raw_list[[ 30 ]],
raw_list[[ 31 ]]
)
# HERE the final CORPUS for Germany (comprising the corpus of all documents) is done.
# transformation ortographic punctuation
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x,"'"," ")))
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x," ' "," ")))
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x,"'''"," ")))
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x," ' "," ")))
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x,"'"," ")))
# corpus composition
corpus_de <- corpus(corpus_vector)
# Preparation for variables annotation
# variable: type of content
de_content_business <- c("Automobil Industrie Online" ,
"boerse-online.de" ,
"bondguide online" ,
"Börse Online" ,
"Börsen-Zeitung" ,
"Börsen Radio Network AG" ,
"Capital" ,
"Citywire" ,
"Czerwensky intern" ,
"DGAP Finanznachrichten" ,
"Die Bank" ,
"Dow Jones Newswires German" ,
"Euro am Sonntag" ,
"Fundamentalanalyse / Research" ,
"Gründerszene" ,
"manager magazin Online" ,
"Neumarkter Nachrichten" ,
"news aktuell OTS - Originaltextservice" ,
"Platow Brief" ,
"Unternehmeredition online" ,
"VerkehrsRundschau" ,
"Versicherungswirtschaft" ,
"WirtschaftsWoche Online" ,
"ZfK - Zeitung für kommunale Wirtschaft" ,
"ZfK - Zeitung für kommunale Wirtschaft Online" )
# variable: geographical cover
de_cover_local <- c(
"Berliner Zeitung" ,
"Bremer Nachrichten" ,
"Dresdner Neueste Nachrichten" ,
"Dresdner Neueste Nachrichten Online" ,
"Frankenpost" ,
"Frankfurter Neue Presse Online" ,
"General Anzeiger" ,
"Göttinger Tageblatt / Eichsfelder Tageblatt Online" ,
"Hamburger Abendblatt" ,
"Hamburger Abendblatt Online" ,
"Hamburger Morgenpost" ,
"Hannoversche Allgemeine Zeitung" ,
"Hannoversche Allgemeine Zeitung Online" ,
"Leipziger Volkszeitung" ,
"Leipziger Volkszeitung Online" ,
"Lübecker Nachrichten Online" ,
"Main-Spitze" ,
"Märkische Allgemeine Zeitung Online" ,
"Münchner Merkur" ,
"Neue Presse Online" ,
"Neue Westfälische" ,
"Nordbayerischer Kurier" ,
"Ostsee-Zeitung Online" ,
"Passauer Neue Presse" ,
"Peiner Allgemeine Zeitung Online" ,
"Rheinische Post Online" ,
"Schaumburger Nachrichten Online" ,
"Schwarzwälder Bote" ,
"Trierischer Volksfreund" ,
"Weser Kurier" ,
"Wolfsburger Allgemeine Online / Aller-Zeitung Online"
)
# variable: political alignment
de_rating_left <- c("DIE ZEIT" ,
"Frankfurter Rundschau" ,
"Süddeutsche Zeitung" ,
"Süddeutsche Zeitung Online" ,
"ZEIT online" )
de_rating_neutral <- c("Handelsblatt" ,
"Handelsblatt Online" ,
"Spiegel Online" ,
"Spiegel Plus" ,
"Berliner Zeitung" ,
"Bremer Nachrichten" ,
"Dresdner Neueste Nachrichten" ,
"Dresdner Neueste Nachrichten Online" ,
"Frankenpost" ,
"Frankfurter Neue Presse Online" ,
"General Anzeiger" ,
"Göttinger Tageblatt / Eichsfelder Tageblatt Online" ,
"Hamburger Abendblatt" ,
"Hamburger Abendblatt Online" ,
"Hamburger Morgenpost" ,
"Hannoversche Allgemeine Zeitung" ,
"Hannoversche Allgemeine Zeitung Online" ,
"Leipziger Volkszeitung" ,
"Leipziger Volkszeitung Online" ,
"Lübecker Nachrichten Online" ,
"Main-Spitze" ,
"Märkische Allgemeine Zeitung Online" ,
"Neue Presse Online" ,
"Neue Westfälische" ,
"Nordbayerischer Kurier" ,
"Ostsee-Zeitung Online" ,
"Passauer Neue Presse" ,
"Peiner Allgemeine Zeitung Online" ,
"Rheinische Post Online" ,
"Schaumburger Nachrichten Online" ,
"Schwarzwälder Bote" ,
"Trierischer Volksfreund" ,
"Weser Kurier" ,
"Wolfsburger Allgemeine Online / Aller-Zeitung Online" ,
"manager magazin Online" ,
"WirtschaftsWoche Online" ,
"Automobil Industrie Online" ,
"boerse-online.de" ,
"bondguide online" ,
"Börse Online" ,
"Börsen-Zeitung" ,
"Börsen Radio Network AG" ,
"Capital" ,
"Citywire" ,
"Czerwensky intern" ,
"DGAP Finanznachrichten" ,
"Die Bank" ,
"Dow Jones Newswires German" ,
"Euro am Sonntag" ,
"Fundamentalanalyse / Research" ,
"Gründerszene" ,
"Neumarkter Nachrichten" ,
"news aktuell OTS - Originaltextservice" ,
"Platow Brief" ,
"Unternehmeredition online" ,
"VerkehrsRundschau" ,
"Versicherungswirtschaft" ,
"ZfK - Zeitung für kommunale Wirtschaft" ,
"ZfK - Zeitung für kommunale Wirtschaft Online" ,
"Bundesanzeiger Jahresabschluss-Veröffentlichungen" ,
"Der Spiegel" ,
"Der Tagesspiegel" ,
"Der Tagesspiegel Online" ,
"Deutsche Welle" ,
"Express" ,
"Focus" ,
"Focus-Money" ,
"Focus Online" ,
"Reuters - Nachrichten auf Deutsch" )
de_rating_right <- c("Münchner Merkur" ,
"BILD Plus" ,
"bild.de" ,
"Die Welt" ,
"Welt am Sonntag" ,
"WELT online" )
# variable: means of diffusion
de_diffusion_online <- c("Wolfsburger Allgemeine Online / Aller-Zeitung Online" ,
"WirtschaftsWoche Online" ,
"Handelsblatt Online" ,
"Spiegel Online" ,
"ZEIT online" ,
"Dresdner Neueste Nachrichten Online" ,
"Hamburger Abendblatt Online" ,
"Hannoversche Allgemeine Zeitung Online" ,
"Leipziger Volkszeitung Online" ,
"Märkische Allgemeine Zeitung Online" ,
"Rheinische Post Online" ,
"Schaumburger Nachrichten Online" ,
"manager magazin Online" ,
"Automobil Industrie Online" ,
"boerse-online.de" ,
"bondguide online" ,
"Börse Online" ,
"Börsen-Zeitung" ,
"Börsen Radio Network AG" ,
"DGAP Finanznachrichten" ,
"Dow Jones Newswires German" ,
"Fundamentalanalyse / Research" ,
"Gründerszene" ,
"news aktuell OTS - Originaltextservice" ,
"Unternehmeredition online" ,
"ZfK - Zeitung für kommunale Wirtschaft Online" ,
"Der Spiegel" ,
"Der Tagesspiegel Online" ,
"Deutsche Welle" ,
"Focus Online" ,
"BILD Plus" ,
"bild.de" ,
"WELT online")
# variables annotation
corpus_de$content <- ifelse(docvars(corpus_de,"origin") %in% de_content_business, "business","general")
corpus_de$cover <- ifelse(docvars(corpus_de,"origin") %in% de_cover_local, "local","national")
corpus_de$diffusion <- ifelse(docvars(corpus_de, "origin") %in% de_diffusion_online, "online","paper")
corpus_de$rating <- ifelse(docvars(corpus_de,"origin") %in% de_rating_left,"left",
ifelse(docvars(corpus_de,"origin") %in% de_rating_right,"right","center"))
corpus_de$origin2 <- ifelse(docvars(corpus_de,"origin") %in% c("BILD Plus","bild.de"),"Bild",
ifelse(docvars(corpus_de,"origin") %in% c("Bremer Nachrichten","Weser Kurier"), "Weser Kurier",
ifelse(docvars(corpus_de,"origin") %in% c("Der Tagesspiegel Online","Der Tagesspiegel"), "Taggespiel" ,
ifelse(docvars(corpus_de,"origin") %in% c("DIE ZEIT","ZEIT online"), "Die Zeit",
ifelse(docvars(corpus_de,"origin") %in% c("Dresdner Neueste Nachrichten Online","Dresdner Neueste Nachrichten"), "Dresdner Neueste",
ifelse(docvars(corpus_de, "origin") %in% c("Focus Online","Focus-Money","Focus"), "Focus",
ifelse(docvars(corpus_de,"origin") %in% c("Hamburger Abendblatt Online","Hamburger Abendblatt"), "Hamburger Abendblatt",
ifelse(docvars(corpus_de,"origin") %in% c("Handelsblatt Online","Handelsblatt","WirtschaftsWoche Online"), "Handelsblatt" ,
ifelse(docvars(corpus_de,"origin") %in% c("Hannoversche Allgemeine Zeitung Online","Hannoversche Allgemeine Zeitung"), "HAZ",
ifelse(docvars(corpus_de,"origin") %in% c("Leipziger Volkszeitung Online","Leipziger Volkszeitung"), "Leipziger Volkszeitung" ,
ifelse(docvars(corpus_de,"origin") %in% c("Spiegel Online","Spiegel Plus"), "Spiegel",
ifelse(docvars(corpus_de,"origin") %in% c("Süddeutsche Zeitung Online","Süddeutsche Zeitung"), "Suddeutsche Zeitung",
ifelse(docvars(corpus_de,"origin") %in% c("Welt am Sonntag","WELT online","Die Welt"), "Die Welt",
ifelse(docvars(corpus_de,"origin") %in% c("ZfK - Zeitung für kommunale Wirtschaft Online","ZfK - Zeitung für kommunale Wirtschaft"), "Zfkw",
corpus_de$origin))))))))))))))
# papers from Frankfurter Allgemeine Zeitung to append
faz_source <- readtext(paste0(getwd(), "*.pdf"),
docvarsfrom = "filenames",
docvarnames = c("datetimestamp", "origin2"),
sep = "_",
encoding = "UTF-8")
faz_corpus <- corpus(faz_source)
faz_corpus$rating <- "right"
faz_corpus$diffusion <- "online"
faz_corpus$content <- "general"
faz_corpus$cover <- "national"
faz_corpus$origin <- "FAZ.net"
# combine corpora
corpus_de <- (faz_corpus + corpus_de)
# variable date
corpus_de$datet <- as_date(corpus_de$datetimestamp)
corpus_de <- corpus_de[!duplicated(docvars(corpus_de)),] # delete duplicates
corpus_de08 <- corpus_de
save(corpus_de08,file = "corpus_de08full.Rdata")
# BIGRAM GERMANY
tx_de <- convert(corpus_de08, to = "data.frame")
bigrams_tx <- tx_de %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
bigrams_tx %>% dplyr::count(bigram, sort = TRUE)
bigrams_separate <- bigrams_tx %>% separate(bigram,c("word1","word2"),sep=" ")
bigrams_filtered <- bigrams_separate %>%
filter(!word1 %in% stopwords_de) %>%
filter(!word2 %in% stopwords_de)
bigrams_filtered <- bigrams_filtered %>% dplyr::count(word1, word2, sort = TRUE)
bigrams_united <- bigrams_filtered %>% unite(bigram, word1, word2, sep = " ")
# bigrams_united <- bigrams_united$bigram
bigrams_united <- unique(bigrams_united)
write.csv(bigrams_united,"bigramsdenew.csv",row.names= F)
trigrams_tx <- tx_de %>% unnest_tokens(trigram, text, token = "ngrams", n = 3)
trigrams_tx %>% dplyr::count(trigram, sort = TRUE)
trigrams_separate <- trigrams_tx %>% separate(trigram,c("word1","word2","word3"),sep=" ")
trigrams_filtered <- trigrams_separate %>%
filter(!word1 %in% stopwords_de) %>%
filter(!word2 %in% stopwords_de) %>%
filter(!word3 %in% stopwords_de)
trigrams_filtered <- trigrams_filtered %>% dplyr::count(word1, word2,word3, sort = TRUE)
trigrams_united <- trigrams_filtered %>% unite(trigram, word1, word2,word3, sep = " ")
# trigrams_united <- trigrams_united$trigram
trigrams_united <- unique(trigrams_united)
# Italy collection raw data (ignore) #####
# html from Factiva download
# source read by Factiva for each document. It creates a nested list
filenames <- list.files(pattern="*.html", full.names=TRUE)
# source read by Factiva for each document. It creates a nested list
source_list <- lapply(filenames,FactivaSource)
# last Factiva passage for each document
raw_list <- lapply(source_list,Corpus,list(language = NA))
# to check the number of the list's elements. This is to arrange the number within the brackets below
n <- length(raw_list)
# Vector with all elements of the nested list. To avoid writing each element
corpus_vector <- c(raw_list[[ 1 ]],
raw_list[[ 2 ]],
raw_list[[ 3 ]],
raw_list[[ 4 ]],
raw_list[[ 5 ]],
raw_list[[ 6 ]],
raw_list[[ 7 ]],
raw_list[[ 8 ]],
raw_list[[ 9 ]],
raw_list[[ 10 ]],
raw_list[[ 11 ]],
raw_list[[ 12 ]],
raw_list[[ 13 ]],
raw_list[[ 14 ]],
raw_list[[ 15 ]],
raw_list[[ 16 ]],
raw_list[[ 17 ]],
raw_list[[ 18 ]],
raw_list[[ 19 ]],
raw_list[[ 20 ]],
raw_list[[ 21 ]],
raw_list[[ 22 ]],
raw_list[[ 23 ]],
raw_list[[ 24 ]],
raw_list[[ 25 ]],
raw_list[[ 26 ]],
raw_list[[ 27 ]] ,
raw_list[[ 28 ]] ,
raw_list[[ 29 ]] ,
raw_list[[ 30 ]] ,
raw_list[[ 31 ]] ,
raw_list[[ 32 ]] ,
raw_list[[ 33 ]] ,
raw_list[[ 34 ]] ,
raw_list[[ 35 ]] ,
raw_list[[ 36 ]] ,
raw_list[[ 37 ]] ,
raw_list[[ 38 ]] ,
raw_list[[ 39 ]] ,
raw_list[[ 40 ]]
)
# HERE the final CORPUS for Italy (comprising corpus of all documents) is done.
# transformation ortographic punctuation
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x,"'"," ")))
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x," ' "," ")))
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x,"'''"," ")))
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x," ' "," ")))
corpus_vector <- tm_map(corpus_vector,content_transformer(function(x,pattern) str_replace_all(x,"'"," ")))
# corpus composition
corpus_it <- corpus(corpus_vector)
corpus_it <- corpus_it[!duplicated(docvars(corpus_it)),] # remove duplicates
# variable date
corpus_it$datet <- as_date(corpus_it$datetimestamp)
# Preparation for variables annotation
# variable: type of content
it_content_business <- c("Assinews" ,
"Bebeez.it" ,
"Bluerating Online" ,
"Il Sole 24 Ore-Online" ,
"Il Sole 24 Ore Digital Replica Edition of Print Edition" ,
"ItaliaOggi" ,
"ItaliaOggi7" ,
"La Legge Per Tutti" ,
"MF - Mercati Finanziari" ,
"Milano Finanza" ,
"Radiocor Italian Language Newswire" ,
"Industria Italiana")
# variable: geographical cover
it_cover_local <- c("Corriere Alto Adige" ,
"Corriere del Mezzogiorno" ,
"Corriere del Veneto" ,
"Corriere delle Alpi" ,
"Corriere di Bologna" ,
"Corriere Fiorentino" ,
"Gazzetta di Modena Online" ,
"Il Gazzettino" ,
"Il Gazzettino Online" ,
"Il Tirreno" ,
"L'Arena" ,
"La Gazzetta del Mezzogiorno" ,
"La Nuova Sardegna" ,
"La Provincia Pavese" ,
"La Repubblica Firenze" ,
"La Repubblica Milano" ,
"La Repubblica Torino" ,
"La Tribuna di Treviso" ,
"Messaggero Veneto" ,
"Messaggero Veneto Online")
# variable: political alignment
it_rating_left <- c("La Repubblica Firenze" ,
"La Repubblica Milano" ,
"La Repubblica Torino" ,
"Avvenire" ,
"Avvenire Online" ,
"Il Fatto Quotidiano" ,
"Il Fatto Quotidiano Online" ,
"La Repubblica" ,
"La Repubblica.it")
it_rating_neutral <- c("Corriere delle Alpi" ,
"Gazzetta di Modena Online" ,
"Il Gazzettino" ,
"Il Gazzettino Online" ,
"Il Tirreno" ,
"L'Arena" ,
"La Gazzetta del Mezzogiorno" ,
"La Nuova Sardegna" ,
"La Provincia Pavese" ,
"La Tribuna di Treviso" ,
"Messaggero Veneto" ,
"Messaggero Veneto Online" ,
"Assinews" ,
"Bebeez.it" ,
"Bluerating Online" ,
"Il Sole 24 Ore-Online" ,
"Il Sole 24 Ore Digital Replica Edition of Print Edition" ,
"ItaliaOggi" ,
"ItaliaOggi7" ,
"La Legge Per Tutti" ,
"MF - Mercati Finanziari" ,
"Milano Finanza" ,
"Radiocor Italian Language Newswire" ,
"Industria Italiana" ,
"24Ovest.it" ,
"Adnkronos - General News" ,
"Adnkronos - Labor News" ,
"Agenparl" ,
"Agenzia Giornalistica Italia" ,
"ANSA - Economic and Financial Service" ,
"ANSA - Political and Economic News Service" ,
"ANSA - Regional Service" ,
"Askanews" ,
"Citynews Italy" ,
"HuffPost Italia" ,
"Il Piccolo" ,
"Il Piccolo Online" ,
"Il Resto del Carlino" ,
"L'Espresso" ,
"La Nazione" ,
"La Stampa" ,
"Reuters - Notizie in Italiano")
it_rating_right <- c("Corriere Alto Adige" ,
"Corriere del Mezzogiorno" ,
"Corriere del Veneto" ,
"Corriere di Bologna" ,
"Corriere Fiorentino" ,
"Corriere della Sera" ,
"Corriere della Sera Magazines and Supplements" ,
"Corriere della Sera Online" ,
"Il Giornale" ,
"Il Giorno" ,
"Il Messaggero" ,
"Il Messaggero Online")
# variable: means of diffusion
it_diffusion_online <- c("Adnkronos - General News" ,
"Avvenire Online" ,
"Il Fatto Quotidiano Online" ,
"La Repubblica.it" ,
"Il Gazzettino Online" ,
"Messaggero Veneto Online" ,
"Bebeez.it" ,
"Il Sole 24 Ore-Online" ,
"La Legge Per Tutti" ,
"24Ovest.it" ,
"Adnkronos - Labor News" ,
"Agenparl" ,
"Askanews" ,
"HuffPost Italia" ,
"Il Piccolo Online" ,
"Corriere della Sera Online" ,
"Il Messaggero Online")
# variables annotation
corpus_it$content <- ifelse(docvars(corpus_it, "origin") %in% it_content_business,
"business", "general")
corpus_it$cover <- ifelse(docvars(corpus_it, "origin") %in% it_cover_local,"local","national")
corpus_it$diffusion <- ifelse(docvars(corpus_it,"origin") %in% it_diffusion_online, "online","paper")
corpus_it$rating <- ifelse(docvars(corpus_it,"origin") %in% it_rating_left,"left",
ifelse(docvars(corpus_it,"origin") %in% it_rating_right,"right","neutral") )
corpus_it$origin2 <- ifelse(docvars(corpus_it,"origin") %in%
c("Adnkronos - General News","Adnkronos - Labor News"),"adnkronos",
ifelse(docvars(corpus_it,"origin") %in% c("ANSA - Economic and Financial Service",
"ANSA - Political and Economic News Service","ANSA - Regional Service"),"Ansa",
ifelse(docvars(corpus_it,"origin") %in% c("Avvenire Online","Avvenire"),"Avvenire", ifelse(docvars(corpus_it, "origin") %in% c("Corriere Alto Adige","Corriere del Mezzogiorno","Corriere del Veneto","Corriere della Sera Magazines and Supplements","Corriere della Sera Online","Corriere della Sera","Corriere di Bologna","Corriere Fiorentino"), "Corriere Sera",
ifelse(docvars(corpus_it, "origin") %in% c("Il Fatto Quotidiano Online","Il Fatto Quotidiano"), "Fatto Quotidiano",
ifelse(docvars(corpus_it,"origin") %in% c("Il Gazzettino Online","Il Gazzettino"),"Il Gazzettino",
ifelse(docvars(corpus_it,"origin") %in% c("Il Messaggero Online","Il Messaggero"),"Il Messaggero",
ifelse(docvars(corpus_it,"origin") %in% c("Il Piccolo Online","Il Piccolo"),"Il Piccolo",
ifelse(docvars(corpus_it,"origin") %in% c( "Il Sole 24 Ore Digital Replica Edition of Print Edition","Il Sole 24 Ore-Online","Radiocor Italian Language Newswire"), "Sole 24 ore",
ifelse(docvars(corpus_it,"origin") %in% c("ItaliaOggi","ItaliaOggi7" ), "ItaliaOggi", ifelse(docvars(corpus_it,"origin") %in% c("La Repubblica Firenze","La Repubblica Milano","La Repubblica Torino","La Repubblica.it","La Repubblica"), "La Repubblica",
ifelse(docvars(corpus_it,"origin") %in% c("Messaggero Veneto Online","Messaggero Veneto" ),"Messaggero Veneto" ,
ifelse(docvars(corpus_it,"origin") %in% c("MF - Mercati Finanziari","Milano Finanza" ),"Milano Finanza" ,
corpus_it$origin)))))))))))))
corpus_it08 <- corpus_it
save(corpus_it08,file = "corpus_it08orig.Rdata")
#### DATA ANALYSIS
## UPLOAD GERMAN CORPUS to use for all analysis for Germany #####
# upload for either keyness corpus-level & co-occurrences networks or keyness sentence-level
load("corpus_de08full.Rdata")
corpus_de08 <- corpus_de08[!duplicated(docvars(corpus_de08)),] # delete duplicates
corpus_de08$country <- "Germany"
corpus_de08$covidtp[corpus_de08$datet < "2020-01-01"] <- "before 2020-01-01"
corpus_de08$covidtp[corpus_de08$datet >= "2020-01-01"] <- "after 2020-01-01"
corpus_de08$abbrev[corpus_de08$origin2 == "Handelsblatt"] <- "HB"
corpus_de08$abbrev[corpus_de08$origin2 == "Suddeutsche Zeitung"] <- "SZ"
corpus_de08$abbrev[corpus_de08$origin2 == "Die Welt"] <- "WT"
corpus_de08$abbrev[corpus_de08$origin2 == "Die Zeit"] <- "ZT"
corpus_de08$abbrev[corpus_de08$origin2 == "Spiegel"] <- "SP"
corpus_de08$abbrev[corpus_de08$origin2 == "Bild"] <- "BL"
corpus_de08$abbrev[corpus_de08$origin2 == "FAZ"] <- "FZ"
corpus_de08$rating[corpus_de08$origin2 == "FAZ"] <- "right"
# names to texts
docnames(corpus_de08) <- paste(corpus_de08$abbrev, corpus_de08$id, sep="_")
# selection of newspapers of interest
corpus_de08 <- corpus_subset(corpus_de08,
origin2 == "Suddeutsche Zeitung" | origin2 == "Die Welt" |
origin2 == "Die Zeit" | origin2 == "FAZ")
# filter time >= 2020-01-01
corpus_de08 <- corpus_subset(corpus_de08,datet >= "2020-01-01")
# clean out texts (outliers, out of context etc., commented)
corpus_de08 <- corpus_subset(corpus_de08, !docnames(corpus_de08) %in% c(
#doubles similar
"WT_WELTON0020201129egbt0005v","WT_WELTON0020201202egc20005p","WT_WELTON0020201215egce000mk",
"WT_DWELT00020210112eh1c0000p","SZ_SDDZ000020210301eh310001w",
"WT_WELTON0020210412eh4c000h9","ZT_DIEZEI0020220113ec8b002hj","ZT_DIEZEI0020220113ed1q005j1",
"WT_WELTON0020180712ee7c000pa","WT_WELTON0020190705ef75000p8",
"WT_WELTON0020190907ef97000bu","WT_DWELT00020200512eg5c0000h","WT_DWELT00020200618eg6i0000u",
"WT_DWELT00020200824eg8o0000v","SZ_SDDZ000020200908eg980002f",
"FZ_SD1202003155958457",
"FZ_FD1202003195963222",
"FZ_FD2202008146064354",
"FZ_SD1202008236066007","FZ_FD2202008266072045","FZ_FDA202009016076044","FZ_FD1202009036077473",
"FZ_FZ_20200904", "FZ_FD1202009126082842","FZ_FD2202009166086472","FZ_FD1202009176087739","FZ_FZ_20200918",
"FZ_FD2202010066098334","FZ_FZ_20201019", "FZ_FD1202011096122521", "FZ_SD1202011296136764","FZ_FZ_20201224",
"FZ_FD1202104276234436","FZ_FD1202108025000526464873", #
# outlier length # too short
"WT_WSONNT0020200816eg8g0001g",
"SZ_SDDZ000020210315eh3f0000c",
# piece of information
"WT_DWELT00020201124egbo0000t",
"WT_WSONNT0020211009eha90000w",
"WT_DWELT00020200512eg5c00014",
"ZT_ZEITON0020200810eg8a0002t",
"ZT_ZEITON0020200919eg9j0002v",
"FZ_FD2202009046078725", # trend in bankruptcy
# out of context
"ZT_DIEZEI0020200827eg8r0000t", # shortening work hours,
"WT_WELTON0020201214egce000gb", # interview with banker on general finance state, not limited covid
"ZT_DIEZEI0020201119egbj0000m", # story-like dossier on receiving credit loan
"FZ_FD1202007116039759", # interview to min. economy Peter Altmaier at time, out of context
"WT_WELTON0020200427eg4q00005" , # history out of context
"WT_WELTON0020200712eg7c0005l", # abroad enterprises
"ZT_DIEZEI0020210805eh850000v", # competition automotive
"WT_WELTON0020201228egcs000bz" , # meaningless DAX
"SZ_SDDZ000020170714ed7e0001x" , # menaingless music
"WT_WELTON0020211119ehbj00088" , # meaningless flies
"SZ_SDDZ000020190311ef3b0001p" , # meaningless movie
"ZT_DIEZEI0020130620e96k00029" , # meaningless movie
"SZ_SUDZEIT020141117eabe0001f" , # meaningless movie
"SZ_SDDZ000020161121ecbl0001z" , # meaningless movie
"SZ_SDDZ000020190311ef3b0001p" , # meaningless movie
"ZT_DIEZEI0020110922e79m0002v" , # meaningless movie
"SZ_SDDZ000020170120ed1k0002b" , # meaningless movie
"SZ_SUDZEIT020170713ed7d0002u" , # U2 concerts
"SZ_SDDZ000020181112eebc0001r", # music album
"SZ_SDDZ000020140818ea8i000gw", # essay economics
"SZ_SDDZ000020201231egcv0001k", # personal diary
"SZ_SDDZ000020200228eg2s0002t", # China
"WT_WSONNT0020200322eg3m00054" # lifestyle
))
# Germany: corpus preparation for KEYNESS CORPUS-LEVEL AND CO-OCCURRENCES NETWORKS analysis ####
corpus_de08 <- tolower(corpus_de08)
corpus_de08 <- gsub("'", " ", corpus_de08)
corpus_de08 <- gsub("’", " ", corpus_de08)
corpus_de08 <- gsub("‘", " ", corpus_de08)
corpus_de08 <- gsub("«", " ", corpus_de08)
corpus_de08 <- gsub("»", " ", corpus_de08)
corpus_de08 <- gsub("„"," ", corpus_de08)
corpus_de08 <- gsub("“"," ",corpus_de08)
corpus_de08 <- gsub("\"", " ", corpus_de08)
corpus_de08 <- gsub("\n", " ", corpus_de08)
corpus_de08 <- gsub("\t", " ", corpus_de08)
corpus_de08 <- gsub("\\s+", " ", corpus_de08)
# other file: corpus converted to text
tx_de <- convert(corpus_de08, to = "data.frame")
bg_de <- read_xls("zombiefirms.xls",sheet = "de_bg")[,1]
bg_denot <- read_xls("zombiefirms.xls",sheet = "de_bgnot")[,1]
bg_de <- bg_de$bigram[! bg_de$bigram %in% bg_denot$de_bgnot]
# lemmatization of compounds
zombiefirm_pattern_de <- read_xls("zombiefirms.xls",sheet = "de_lemma")[,1] # no lemmatized terms
zombiefirm_pattern_de <- paste0("\\b",zombiefirm_pattern_de$keyword_de,"\\b") # worked to convert
zombiefirm_replace_de <- read_xls("zombiefirms.xls",sheet = "de_lemma")[,2] # lemmatized version
names(zombiefirm_replace_de$lemma_dezmb) <- zombiefirm_pattern_de
tx_de$text <- str_replace_all(tx_de$text, zombiefirm_replace_de$lemma_dezmb )
# removal list
zombiefirm_removepat <- read_xls("zombiefirms.xls",sheet = "de_rem")[,1] # no lemmatized terms to remove
zombiefirm_removepat <- paste0("\\b",zombiefirm_removepat$rem_de,"\\b") # worked to convert
zombiefirm_removerpl <- read_xls("zombiefirms.xls",sheet = "de_rem")[,2] # lemmatized version to remove
names(zombiefirm_removerpl$rem_de_replace) <- zombiefirm_removepat
tx_de$text <- str_replace_all(tx_de$text, zombiefirm_removerpl$rem_de_replace )
rem_de <- read_xls("zombiefirms.xls",sheet = "de_rem")[,2] # remove list
rem_de <- unique(rem_de$rem_de_replace)
corpus_de08 <- corpus(tx_de)
# document-term matrix
dfm_de <- tokens( corpus_de08,
remove_punct = TRUE,
remove_symbols = TRUE,
remove_separators = TRUE,
remove_numbers = TRUE,
remove_url = FALSE
) %>%
tokens_tolower() %>%
tokens_remove(c(stopwords("de"),get_stopwords(language = "de") ,rem_de)) %>% # , rem_dekey)) %>%
dfm()
## UPLOAD ITALIAN CORPUS to use for all analysis for Italy #####
# upload for either keyness corpus-level & co-occurrences networks or keyness sentence-level
load("corpus_it08orig.Rdata")
corpus_it08 <- corpus_it08[!duplicated(docvars(corpus_it08)),] # delete duplicates
corpus_it08$country <- "Italy"
corpus_it08$covidtp[corpus_it08$datet < "2020-01-01"] <- "before 2020-01-01"
corpus_it08$covidtp[corpus_it08$datet >= "2020-01-01"] <- "after 2020-01-01"
corpus_it08$abbrev[corpus_it08$origin2 == "La Repubblica"] <- "RP"
corpus_it08$abbrev[corpus_it08$origin2 == "Corriere Sera"] <- "CS"
corpus_it08$abbrev[corpus_it08$origin2 == "Il Giornale"] <- "GN"
corpus_it08$abbrev[corpus_it08$origin2 == "Sole 24 ore"] <- "S24"
corpus_it08$abbrev[corpus_it08$origin2 == "Fatto Quotidiano"] <- "FQ"
# names to texts
docnames(corpus_it08) <- paste(corpus_it08$abbrev, corpus_it08$id, sep="_") # 1:ndoc(corpus_de08), sep="_")
# selection of newspapers of interest
corpus_it08 <- corpus_subset(corpus_it08,origin2 == "La Repubblica"|origin2 == "Corriere Sera"|origin2 == "Il Giornale"|origin2 == "Fatto Quotidiano")
# filter time >= 2020-01-01
corpus_it08 <- corpus_subset(corpus_it08,datet >= "2020-01-01")
# clean out texts (outliers, out of context etc., commented)
corpus_it08 <- corpus_subset(corpus_it08, !docnames( corpus_it08) %in% c(
# double similar
"GN_GIONLE0020130426e94q00082","CS_CORDES0020171218edci0000g","CS_CORSUP0020200706eg760000o",
"CS_CORDES0020210201eh210007q",
"CS_CORSUP0020200406eg4600017","CS_CORONL0020200503eg53000br",
#
"CS_CORDES0020200629eg6t00017",
"CS_CORDES0020201102egb20001h",
"CS_CORSUP0020210419eh4j0000w",
# outliers length # too short
"RP_LAREP00020200720eg7k0003c",
"RP_LAREP00020210203eh230002c",
# only piece of information
"RP_LAREP00020201217egch0002h",
"CS_CORDES0020201222egcm0002s",
"GN_GIONLE0020210216eh2g0001l",
"CS_CORDES0020210320eh3k0006t",
"RP_LAREP00020210412eh4c0000i",
# out of context
"GN_GIONLE0020211003eha300010", # just mention
"CS_CORVEN0020210209eh290000k", # summary
"FQ_FATONL0020210310eh3a000bs", # summary
"RP_REPONL0020211024ehao0028w", # not content
"RP_REPONL0020191104efb4000ma" , # date 2019
"CS_CORVEN0020210313eh3d00002", # about vaccine
"CS_CORDES0020141219eacj00063", # about actress Gwyne Paltrow
"FQ_FATONL0020210212eh2c0008w", # about Berlusconi, mention like joke (Berlusconi zombie company)
"CS_CORDES0020160118ec1i00072", # about enterpreneur Calabrò, just mention
"FQ_FATQUO0020150918eb9h0000m", # American federal bank, mention to Europe,
"FQ_FATONL0020200115eg1f000b6", # Camorra case,
"CS_CORONL0020200131eg1v000xf", # Jacobini family Puglia,
"CS_CORDES0020200217eg2h0002o", # real madrid
"CS_CORONL0020200131eg1v00107",# Jacobini family Puglia,
"CS_CORDES0020200201eg2100015", # Jacobini family Puglia,
"RP_REPBAR0020200201eg210000k", # Jacobini family Puglia court,
"RP_LAREP00020200506eg5600001", # Germany Karlsruhe decision vs state bond selling by EU
"CS_CORONL0020200507eg570008e", # Germany kurzarbeit explanation
"FQ_FATQUO0020200508eg570000n", # Personal reply
"GN_GIONLE0020200514eg5e0003v", # not relevant, personal info
"RP_LAREP00020200613eg6d00047", # cina
"CS_CORDES0020200615eg6f0004u", # short info
"CS_CORSUP0020200615eg6f0000m", # short info
"RP_LAREP00020200622eg6m0000j", # 70's, not in relation to covid
"CS_CORONL0020200719eg7j0005l", # Lega specific courtship case
"CS_CORDES0020200724eg7o0007l", # Corneliani firm, out of covid
"RP_REPTOR0020200803eg8300008", # courthsip, no covid
"RP_LAREP00020200913eg9d0001s", # Milan singular court case
"CS_CORONL0020200922eg9m000by", # mention in Brianza antigen
"FQ_FATONL0020200923eg9n0008u", # mention in Brianza antigen
"FQ_FATQUO0020200928eg9r0000h", # no Italy
"RP_LAREP00020201019egaj0001p", # Cina
"RP_LAREP00020201204egc400012", # Lega
"CS_CORDES0020210116eh1g00083", # personal story
"CS_CORSUP0020200629eg6t0000n", # out of context, general comment industry
"CS_CORONL0020201031egav0008n", # summary of issue, no content
"CS_CORSUP0020201109egb90000y", # EU level
"GN_GIONLE0020201211egcb00014", # ilva no covid
"RP_REPNAP0020210117eh1h0000h", # napoli no covid
"FQ_FATONL0020210129eh1s00008", # usa gamestop no covid
"FQ_FATQUO0020210130eh1t0000r", # usa gamestop no covid, reflection ita
"RP_REPTOR0020210218eh2i0000n", # individual court case, no covid
"RP_REPTOR0020210218eh2i0000s", # individual court case, no covid
"CS_CORONL0020210418eh4i001bi", # individual story, no covid
"CS_CORDES0020210618eh6i00075", # small piece info
"CS_CORDES0020210903eh930001t", # snapshot interview no covid
"GN_GIONLE0020211016ehag0003i", # small input info, within series snapshots
"RP_REPGEN0020211208ehc800002", # Ferrero footbal manager case, no covid
"CS_CORONL0020211208ehc80006r", # Ferrero footbal manager case, no covid
"RP_REPGEN0020211211ehcb00005", # Ferrero footbal manager case, no covid
"RP_REPGEN0020211211ehcb0002b", # Ferrero footbal manager case, no covid
"RP_REPGEN0020211218ehci0000g", # Ferrero footbal manager case, no covid
"RP_REPGEN0020220113ei1d00004", # court case, no covid
"CS_CORDES0020210209eh290004j", # piece of news
"CS_CORSUP0020210713eh7d0000v", # about cooperatives, not politics involvement
"GN_GIONLE0020210302eh320002j", # out of context,
"GN_GIONLE0020210501eh5100003", # short, court case
"CS_CORONL0020211103ehb30015p", # just mentioned, broad comment on Draghi
"CS_CORONL0020200706eg76000en" # startup Intesa San Paolo
))
# Italy: corpus preparation for KEYNESS CORPUS-LEVEL AND CO-OCCURRENCES NETWORKS analysis ####
corpus_it08 <- tolower(corpus_it08)
corpus_it08 <- gsub("'", " ", corpus_it08)
corpus_it08 <- gsub("’", " ", corpus_it08)
corpus_it08 <- gsub("‘", " ", corpus_it08)
corpus_it08 <- gsub("«", " ", corpus_it08)
corpus_it08 <- gsub("»", " ", corpus_it08)
corpus_it08 <- gsub("„"," ", corpus_it08)
corpus_it08 <- gsub("“"," ",corpus_it08)
corpus_it08 <- gsub("\"", " ", corpus_it08)
corpus_it08 <- gsub("\n", " ", corpus_it08)
corpus_it08 <- gsub("\t", " ", corpus_it08)
corpus_it08 <- gsub("\\s+", " ", corpus_it08)
# other file: corpus converted to text
tx_it <- convert(corpus_it08, to = "data.frame")
bg_it <- read_xls("zombiefirms.xls",sheet = "it_bg")[,1]
bg_itnot <- read_xls("zombiefirms.xls",sheet = "it_bgnot")[,1]
bg_it <- bg_it$bigram[! bg_it$bigram %in% bg_itnot$it_bgnot]
zombiefirm_pattern_it <- read_xls("zombiefirms.xls",sheet = "it_lemma")[,1] # no lemmatized terms
zombiefirm_pattern_it <- paste0("\\b",zombiefirm_pattern_it$keyword_it,"\\b") # worked to convert
zombiefirm_replace_it <- read_xls("zombiefirms.xls",sheet = "it_lemma")[,2] # lemmatized version
names(zombiefirm_replace_it$lemma_itzmb) <- zombiefirm_pattern_it
tx_it$text <- str_replace_all(tx_it$text, zombiefirm_replace_it$lemma_itzmb )
# removal list
zombiefirm_removepat_it <- read_xls("zombiefirms.xls",sheet = "it_rem")[,1]
zombiefirm_removepat_it <- paste0("\\b",zombiefirm_removepat_it$rem_it,"\\b")
zombiefirm_removerpl_it <- read_xls("zombiefirms.xls",sheet = "it_rem")[,2]
names(zombiefirm_removerpl_it$rem_it_replace) <- zombiefirm_removepat_it
tx_it$text <- str_replace_all(tx_it$text,zombiefirm_removerpl_it$rem_it_replace)
rem_it <- read_xls("zombiefirms.xls",sheet = "it_rem")[,2]
rem_it <- unique(rem_it$rem_it_replace)
corpus_it08 <- corpus(tx_it)
# document-term matrix
dfm_it <- tokens( corpus_it08,
remove_punct = TRUE,
remove_symbols = TRUE,
remove_separators = TRUE,
remove_numbers = TRUE,
remove_url = FALSE
) %>%
tokens_tolower() %>%
tokens_remove(c(stopwords("it"), get_stopwords(language = "it"),rem_it)) %>%
dfm()
# KEYNESS CORPUS-LEVEL ####
# Two different corpora were run for Italian and German corpus to be combined for visualization
# German keyness
kn_de <- textstat_keyness(dfm_group(dfm_subset(dfm_de, datet >= "2020-01-01" ),groups = rating),
target = "right")
kn_de$country <- "Germany"
# Italian keyness
kn_it <- textstat_keyness(dfm_group(dfm_subset(dfm_it, datet >= "2020-01-01" ),groups = rating),
target = "right")
kn_it$country <- "Italy"
# Combining keyness from the two samples
kn_defig <- kn_de[c(1:20,11865:11845),] %>% mutate(feature = reorder(feature, chi2)) %>%
mutate(refgrp = ifelse(chi2 < 0, "Left","Right"))
kn_itfig <- kn_it[c(1:20,12119:12139),] %>% mutate(feature = reorder(feature, chi2)) %>%
mutate(refgrp = ifelse(chi2 < 0, "Left","Right"))
rbind(kn_defig,kn_itfig) %>%
ggplot(aes(x = chi2, y = feature, fill = refgrp)) + geom_col() +
scale_fill_manual(values = c("Left" = "grey","Right" = "black"),
name = "Political Leaning") +
xlab(expression(chi^2)) +
facet_wrap(~ country, scales = "free") +
theme_bw() +
theme(axis.title.y = element_blank(), legend.position = "bottom")
ggsave(filename = "images/fig1.jpg", width = 4.5, height = 5) # 6 5
# Germany: CO-OCCURRENCES NETWORKS ####
# Co-occurrences were edited individually for each country with the following code. Figures were combined with an external editor
# Left Germany
fcm_lf <- tokens(corpus_subset(corpus_de08,datet >= "2020-01-01" & rating == "left"),
remove_punct = TRUE, remove_symbols = TRUE, remove_separators = TRUE,
remove_numbers = TRUE,remove_url = FALSE) %>%
tokens_tolower() %>%
tokens_remove(c(stopwords("de"),get_stopwords(language = "de"),rem_de,