Combine CAM files, clean data

Author

Julius Fenn, Lars Kreuznacht

Notes

Remark:

## global variables
save_CAMs_as_pictures = FALSE

prepare raw data

load packages, raw data, functions

# sets the directory of location of this script as the current directory
# setwd(dirname(rstudioapi::getSourceEditorContext()$path))

########################################
# load packages
########################################
require(pacman)
p_load('tidyverse', 'jsonlite', 'magrittr', 'xlsx',
       'stargazer', 'psych', 'jtools', 'DT', 'ggstatsplot', 
       'lavaan', 
       'regsem', 'MplusAutomation', 'igraph',
       'vroom')


########################################
# load data files
########################################
setwd("data")
# dir()


### load CAM files
# individual
suppressMessages(read_file("cam_ind.txt") %>%
  # ... split it into lines ...
  str_split('\n') %>% first() %>%
    discard(function(x) x == '') %>%
    discard(function(x) x == '\r') %>%
  # ... filter empty rows ...
  discard(function(x) x == '')) -> dat_CAM_individual

raw_CAM_individual <- list()
for(i in 1:length(dat_CAM_individual)){
  raw_CAM_individual[[i]] <- jsonlite::fromJSON(txt = dat_CAM_individual[[i]])
}
# rm(dat_CAM_individual)


# group
suppressMessages(read_file("cam_group.txt") %>%
  # ... split it into lines ...
  str_split('\n') %>% first() %>%
    discard(function(x) x == '') %>%
    discard(function(x) x == '\r') %>%
  # ... filter empty rows ...
  discard(function(x) x == '')) -> dat_CAM_group

raw_CAM_group <- list()
for(i in 1:length(dat_CAM_group)){
  raw_CAM_group[[i]] <- jsonlite::fromJSON(txt = dat_CAM_group[[i]])
}
# rm(dat_CAM_group)


## load file for manually overwriting words
dat_SosciSurvey <- read.delim(
  file= "rdata_test399712_2024-04-22_16-00.csv", encoding="UTF-8", fileEncoding="UTF-8",
  header = FALSE, sep = "\t", quote = "\"",
  dec = ".", row.names = NULL,
  col.names = c(
    "CASE","SERIAL","REF","QUESTNNR","MODE","STARTED","AB01","BD01_01","BD02",
    "BD04","BD04_02","BD06","BD07","GE01_01","GE01_02","GE02","GP_01","GP_02",
    "GP_03","GP_04","GP_05","GP_06","GP_07","GP_08","GP_09","GP_10","GP_11","GP_12",
    "GP_13","GP_14","GP_15","GP_16","IC01","PC01_05","PC01_08","PC01_09","PC01_12",
    "PC01_06","PC01_11","PC01_04","PC01_03","PC06_01","PC06_02","PC02_01","PC02_02",
    "PC02_03","PC02_08","PC02_04","PC02_05","PC02_06","PC02_07","PC03_01","PC03_02",
    "PC03_04","PC03_06","PC03_07","PC03_08","PC03_09","PC03_10","PC03_11","PC0_GN",
    "PO01","PO01_07","SO01_01","TE01_RV1","TE02","TE02_02","TIME001","TIME002",
    "TIME003","TIME004","TIME005","TIME006","TIME007","TIME008","TIME009","TIME010",
    "TIME011","TIME012","TIME013","TIME014","TIME015","TIME016","TIME017","TIME018",
    "TIME019","TIME020","TIME021","TIME022","TIME023","TIME024","TIME025","TIME026",
    "TIME027","TIME028","TIME029","TIME_SUM","MAILSENT","LASTDATA","FINISHED",
    "Q_VIEWER","LASTPAGE","MAXPAGE","MISSING","MISSREL","TIME_RSI"
  ),
  as.is = TRUE,
  colClasses = c(
    CASE="numeric", SERIAL="character", REF="character", QUESTNNR="character",
    MODE="factor", STARTED="POSIXct", AB01="numeric", BD01_01="numeric",
    BD02="numeric", BD04="numeric", BD04_02="character", BD06="numeric",
    BD07="numeric", GE01_01="numeric", GE01_02="numeric", GE02="numeric",
    GP_01="numeric", GP_02="numeric", GP_03="numeric", GP_04="numeric",
    GP_05="numeric", GP_06="numeric", GP_07="numeric", GP_08="numeric",
    GP_09="numeric", GP_10="numeric", GP_11="numeric", GP_12="numeric",
    GP_13="numeric", GP_14="numeric", GP_15="numeric", GP_16="numeric",
    IC01="numeric", PC01_05="numeric", PC01_08="numeric", PC01_09="numeric",
    PC01_12="numeric", PC01_06="numeric", PC01_11="numeric", PC01_04="numeric",
    PC01_03="numeric", PC06_01="numeric", PC06_02="numeric", PC02_01="numeric",
    PC02_02="numeric", PC02_03="numeric", PC02_08="numeric", PC02_04="numeric",
    PC02_05="numeric", PC02_06="numeric", PC02_07="numeric", PC03_01="numeric",
    PC03_02="numeric", PC03_04="numeric", PC03_06="numeric", PC03_07="numeric",
    PC03_08="numeric", PC03_09="numeric", PC03_10="numeric", PC03_11="numeric",
    PC0_GN="numeric", PO01="numeric", PO01_07="character", SO01_01="character",
    TE01_RV1="character", TE02="numeric", TE02_02="character",
    TIME001="integer", TIME002="integer", TIME003="integer", TIME004="integer",
    TIME005="integer", TIME006="integer", TIME007="integer", TIME008="integer",
    TIME009="integer", TIME010="integer", TIME011="integer", TIME012="integer",
    TIME013="integer", TIME014="integer", TIME015="integer", TIME016="integer",
    TIME017="integer", TIME018="integer", TIME019="integer", TIME020="integer",
    TIME021="integer", TIME022="integer", TIME023="integer", TIME024="integer",
    TIME025="integer", TIME026="integer", TIME027="integer", TIME028="integer",
    TIME029="integer", TIME_SUM="integer", MAILSENT="POSIXct",
    LASTDATA="POSIXct", FINISHED="logical", Q_VIEWER="logical",
    LASTPAGE="numeric", MAXPAGE="numeric", MISSING="numeric", MISSREL="numeric",
    TIME_RSI="numeric"
  ),
  skip = 1,
  check.names = TRUE, fill = TRUE,
  strip.white = FALSE, blank.lines.skip = TRUE,
  comment.char = "",
  na.strings = ""
)
########################################
# load functions
########################################
# print(getwd())
setwd("../functions")
for(i in 1:length(dir())){
  # print(dir()[i])
  source(dir()[i], encoding = "utf-8")
}


setwd("../functions_CAMapp")
for(i in 1:length(dir())){
  # print(dir()[i])
  source(dir()[i], encoding = "utf-8")
}
rm(i)

set up CAM data

ceck for CAM ids

all CAMs have unique IDs

# German
for(i in 1:length(raw_CAM_group)){
  print(raw_CAM_group[[i]]$creator)
}
[1] "700"
[1] "672"
[1] "750"
[1] "708"
[1] "759"
[1] "690"
[1] "663"
[1] "733"
[1] "726"
[1] "742"
[1] "657"
# English
for(i in 1:length(raw_CAM_individual)){
  print(raw_CAM_individual[[i]]$creator)
}
[1] "674"
[1] "671"
[1] "673"
[1] "736"
[1] "724"
[1] "732"
[1] "727"
[1] "734"
[1] "725"
[1] "741"
[1] "744"
[1] "743"
[1] "654"
[1] "653"
[1] "656"
[1] "753"
[1] "709"
[1] "751"
[1] "707"
[1] "749"
[1] "710"
[1] "686"
[1] "758"
[1] "685"
[1] "665"
[1] "761"
[1] "692"
[1] "662"
[1] "760"
[1] "689"
[1] "664"
[1] "698"
[1] "701"
[1] "702"

set up CAM data individual

Create CAM files, draw CAMs and compute network indicators

### create CAM single files (nodes, connectors, merged)
CAMfiles_individual <- create_CAMfiles(datCAM = raw_CAM_individual, reDeleted = TRUE)
Nodes and connectors, which were deleted by participants were removed. 
 # deleted nodes:  82 
 # deleted connectors:  22
## remove empty concepts:
CAMfiles_individual[[1]]$text[nchar(CAMfiles_individual[[1]]$text) < 2]
character(0)
# tmp_ids <- CAMfiles_individual[[1]]$id[nchar(CAMfiles_individual[[1]]$text) < 2]
# table(CAMfiles_individual[[1]]$isActive[CAMfiles_individual[[1]]$id %in% tmp_ids])
# CAMfiles_individual[[1]] <- CAMfiles_individual[[1]][!CAMfiles_individual[[1]]$id %in% tmp_ids,]


### draw CAMs
CAMdrawn_individual <- draw_CAM(dat_merged = CAMfiles_individual[[3]],
                     dat_nodes = CAMfiles_individual[[1]],ids_CAMs = "all",
                     plot_CAM = FALSE,
                     useCoordinates = TRUE,
                     relvertexsize = 3,
                     reledgesize = 1)
processing 34 CAMs... 
Warning: `graph.data.frame()` was deprecated in igraph 2.0.0.
ℹ Please use `graph_from_data_frame()` instead.
[1] "== participantCAM in drawnCAM"
## check for non reasonable words
for(i in 1:length(CAMdrawn_individual)){
  if(any(nchar(V(CAMdrawn_individual[[i]])$label) < 3)){
    print(V(CAMdrawn_individual[[i]])$label)
  }
}

### network indicators
tmp_microIndicator <- c("Wirtschaftswachstum", "Ökologische Nachhaltigkeit") # pre-defined concepts
networkIndicators_individual <- compute_indicatorsCAM(drawn_CAM = CAMdrawn_individual, 
                                           micro_degree = tmp_microIndicator, 
                                           micro_valence = tmp_microIndicator, 
                                           micro_centr_clo = tmp_microIndicator, 
                                           micro_transitivity = tmp_microIndicator, 
                                           largestClique = FALSE)
Warning: `graph.density()` was deprecated in igraph 2.0.0.
ℹ Please use `edge_density()` instead.
Warning: The `types1` argument of `assortativity()` is deprecated as of igraph 1.6.0.
ℹ Please use the `values` argument instead.
Warning: `assortativity.degree()` was deprecated in igraph 2.0.0.
ℹ Please use `assortativity_degree()` instead.
## check for CAMs who have not changed valence
tmp_ids <- networkIndicators_individual$CAM_ID[is.na(networkIndicators_individual$assortativity_valence_macro)]
for(i in tmp_ids){
    plot(CAMdrawn_individual[[i]], edge.arrow.size = .7,
       layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
       vertex.size = 10, vertex.label.cex = .9)
}


### wordlist
CAMwordlist_individual <- create_wordlist(
  dat_nodes =  CAMfiles_individual[[1]],
  dat_merged =  CAMfiles_individual[[3]],
  useSummarized = TRUE,
  order = "frequency",
  splitByValence = FALSE,
  comments = TRUE,
  raterSubsetWords = NULL,
  rater = FALSE
)
[1] "create_wordlist - use raw words"
[1] 0
[1] 456
[1] "temporarily suffixes are added, because not all words have been summarized"
processing 34 CAMs... 
[1] "== participantCAM in drawnCAM"
DT::datatable(CAMwordlist_individual, options = list(pageLength = 5)) 

save CAMs as .json files, and as .png (igraph)

if(save_CAMs_as_pictures){
setwd("outputs")

setwd("savedCAMs_individual")
setwd("png")
### remove all files if there are any
if(length(list.files()) >= 1){
  file.remove(list.files())
  cat('\n!
      all former .png files have been deleted')
}

### if no participant ID was provided replace by randomly generated CAM ID

if(all(CAMfiles_individual[[3]]$participantCAM.x == "noID")){
  CAMfiles_individual[[3]]$participantCAM.x <- CAMfiles_individual[[3]]$CAM.x
}

### save as .json files, and as .png (igraph)
ids_CAMs <- unique(CAMfiles_individual[[3]]$participantCAM.x); length(ids_CAMs)


for(i in 1:length(ids_CAMs)){
  save_graphic(filename = paste0("individual_", ids_CAMs[i])) #  paste0(ids_CAMs[i]))
  CAM_igraph <- CAMdrawn_individual[[c(1:length(CAMdrawn_individual))[
    names(CAMdrawn_individual) == paste0(unique(CAMfiles_individual[[3]]$participantCAM.x)[i])]]]
  plot(CAM_igraph, edge.arrow.size = .7,
       layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
       vertex.size = 10, vertex.label.cex = .9)
  dev.off()
}

setwd("../json")
### remove all files if there are any
if(length(list.files()) >= 1){
  file.remove(list.files())
  cat('\n!
      all former .json files have been deleted')
}
for(i in 1:length(raw_CAM_individual)){
  if(!is_empty(raw_CAM_individual[[i]]$nodes)){
    if(nrow(raw_CAM_individual[[i]]$nodes) > 5){
      write(toJSON(raw_CAM_individual[[i]], encoding = "UTF-8"),
            paste0(raw_CAM_individual[[i]]$idCAM, ".json"))
    }
  }
}
}

set up CAM data group

Create CAM files, draw CAMs and compute network indicators

### create CAM single files (nodes, connectors, merged)
CAMfiles_group <- create_CAMfiles(datCAM = raw_CAM_group, reDeleted = TRUE)
Nodes and connectors, which were deleted by participants were removed. 
 # deleted nodes:  21 
 # deleted connectors:  10
## remove empty concepts:
CAMfiles_group[[1]]$text[nchar(CAMfiles_group[[1]]$text) < 2]
character(0)
# tmp_ids <- CAMfiles_group[[1]]$id[nchar(CAMfiles_group[[1]]$text) < 2]
# table(CAMfiles_group[[1]]$isActive[CAMfiles_group[[1]]$id %in% tmp_ids])
# CAMfiles_group[[1]] <- CAMfiles_group[[1]][!CAMfiles_group[[1]]$id %in% tmp_ids,]


### draw CAMs
CAMdrawn_group <- draw_CAM(dat_merged = CAMfiles_group[[3]],
                     dat_nodes = CAMfiles_group[[1]],ids_CAMs = "all",
                     plot_CAM = FALSE,
                     useCoordinates = TRUE,
                     relvertexsize = 3,
                     reledgesize = 1)
processing 11 CAMs... 
[1] "== participantCAM in drawnCAM"
## check for non reasonable words
for(i in 1:length(CAMdrawn_group)){
  if(any(nchar(V(CAMdrawn_group[[i]])$label) < 3)){
    print(V(CAMdrawn_group[[i]])$label)
  }
}

### network indicators
tmp_microIndicator <- c("Wirtschaftswachstum", "Ökologische Nachhaltigkeit") # pre-defined concepts
networkIndicators_group <- compute_indicatorsCAM(drawn_CAM = CAMdrawn_group, 
                                           micro_degree = tmp_microIndicator, 
                                           micro_valence = tmp_microIndicator, 
                                           micro_centr_clo = tmp_microIndicator, 
                                           micro_transitivity = tmp_microIndicator, 
                                           largestClique = FALSE)


## check for CAMs who have not changed valence
tmp_ids <- networkIndicators_group$CAM_ID[is.na(networkIndicators_group$assortativity_valence_macro)]
for(i in tmp_ids){
    plot(CAMdrawn_group[[i]], edge.arrow.size = .7,
       layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
       vertex.size = 10, vertex.label.cex = .9)
}

### wordlist
CAMwordlist_group <- create_wordlist(
  dat_nodes =  CAMfiles_group[[1]],
  dat_merged =  CAMfiles_group[[3]],
  useSummarized = TRUE,
  order = "frequency",
  splitByValence = FALSE,
  comments = TRUE,
  raterSubsetWords = NULL,
  rater = FALSE
)
[1] "create_wordlist - use raw words"
[1] 0
[1] 180
[1] "temporarily suffixes are added, because not all words have been summarized"
processing 11 CAMs... 
[1] "== participantCAM in drawnCAM"
DT::datatable(CAMwordlist_group, options = list(pageLength = 5)) 

save CAMs as .json files, and as .png (igraph)

if(save_CAMs_as_pictures){
setwd("outputs")

setwd("savedCAMs_group")
setwd("png")
### remove all files if there are any
if(length(list.files()) >= 1){
  file.remove(list.files())
  cat('\n!
      all former .png files have been deleted')
}

### if no participant ID was provided replace by randomly generated CAM ID

if(all(CAMfiles_group[[3]]$participantCAM.x == "noID")){
  CAMfiles_group[[3]]$participantCAM.x <- CAMfiles_group[[3]]$CAM.x
}

### save as .json files, and as .png (igraph)
ids_CAMs <- unique(CAMfiles_group[[3]]$participantCAM.x); length(ids_CAMs)


for(i in 1:length(ids_CAMs)){
  save_graphic(filename = paste0("group_", ids_CAMs[i])) #  paste0(ids_CAMs[i]))
  CAM_igraph <- CAMdrawn_group[[c(1:length(CAMdrawn_group))[
    names(CAMdrawn_group) == paste0(unique(CAMfiles_group[[3]]$participantCAM.x)[i])]]]
  plot(CAM_igraph, edge.arrow.size = .7,
       layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
       vertex.size = 10, vertex.label.cex = .9)
  dev.off()
}

setwd("../json")
### remove all files if there are any
if(length(list.files()) >= 1){
  file.remove(list.files())
  cat('\n!
      all former .json files have been deleted')
}
for(i in 1:length(raw_CAM_group)){
  if(!is_empty(raw_CAM_group[[i]]$nodes)){
    if(nrow(raw_CAM_group[[i]]$nodes) > 5){
      write(toJSON(raw_CAM_group[[i]], encoding = "UTF-8"),
            paste0(raw_CAM_group[[i]]$idCAM, ".json"))
    }
  }
}
}

merge all CAM files

### save CAMs
setwd("outputs/data CAMs")

writeLines("", "CAMdata_IndGroup.txt") # create file
text_connection <- file("CAMdata_IndGroup.txt", "a") # open connection to append


vec_ids <- c(); h = 1

## for individual CAMs
for(i in 1:length(raw_CAM_individual)){
    if(!is.null(raw_CAM_individual[[i]]$creator) && raw_CAM_individual[[i]]$creator %in% unique(dat_SosciSurvey$CASE)){
    vec_ids[h] <- raw_CAM_individual[[i]]$creator
    h = h + 1
    
    raw_CAM_individual[[i]]$creator <- paste0("individual_", raw_CAM_individual[[i]]$creator) ## overwrite ID
    writeLines(jsonlite::toJSON(x =  raw_CAM_individual[[i]]), text_connection)
  }
}


## for survey_t12 - t2
for(i in 1:length(raw_CAM_group)){
    if(!is.null(raw_CAM_group[[i]]$creator) && raw_CAM_group[[i]]$creator %in% unique(dat_SosciSurvey$CASE)){
    vec_ids[h] <- raw_CAM_group[[i]]$creator
    h = h + 1
    
        raw_CAM_group[[i]]$creator <- paste0("group_", raw_CAM_group[[i]]$creator) ## overwrite ID

    writeLines(jsonlite::toJSON(x =  raw_CAM_group[[i]]), text_connection)
  }
}



close(text_connection)



if(all(vec_ids %in% dat_SosciSurvey$CASE)){
  print("all CAMs saved")
}
[1] "all CAMs saved"

set up Sosci Survey data

prepare data

row.names(dat_SosciSurvey) = dat_SosciSurvey$CASE


attr(dat_SosciSurvey, "project") = "test399712"
attr(dat_SosciSurvey, "description") = "Master_CAM_01"
attr(dat_SosciSurvey, "date") = "2024-04-22 16:00:16"
attr(dat_SosciSurvey, "server") = "https://www.soscisurvey.de"

# Variable und Value Labels
dat_SosciSurvey$AB01 = factor(dat_SosciSurvey$AB01, levels=c("1","2","3","4","5","6","7","8","9","10","11","12","-9"), labels=c("Kein Schulabschluss","Hauptschulabschluss","Realschule (Mittlere Reife)","Gymnasium (Abitur)","Abgeschlossene Ausbildung","Fachhochschulabschluss","Meister*in","Hochschule (Bachelor)","Hochschule (Master)","Hochschule (Promotion)","Andere","Das möchte ich nicht angeben","[NA] nicht beantwortet"), ordered=FALSE)
dat_SosciSurvey$BD02 = factor(dat_SosciSurvey$BD02, levels=c("1","2","3","4","5","-9"), labels=c("Weiblich","Männlich","Divers","<4>","Das möchte ich nicht angeben","[NA] nicht beantwortet"), ordered=FALSE)
dat_SosciSurvey$BD04 = factor(dat_SosciSurvey$BD04, levels=c("1","2","3","-9"), labels=c("Ja","Nein, sondern","Das möchte ich nicht angeben","[NA] nicht beantwortet"), ordered=FALSE)
dat_SosciSurvey$BD06 = factor(dat_SosciSurvey$BD06, levels=c("1","2","3","-9"), labels=c("Ja, aktuell im Bachelor-Studium.","Ja, aktuell im Master-Studium.","Nein.","[NA] nicht beantwortet"), ordered=FALSE)
dat_SosciSurvey$BD07 = factor(dat_SosciSurvey$BD07, levels=c("1","2","-9"), labels=c("Ja","Nein","[NA] nicht beantwortet"), ordered=FALSE)
dat_SosciSurvey$GE02 = factor(dat_SosciSurvey$GE02, levels=c("1","2","3","4","-9"), labels=c("weiteres Wirtschaftswachstum verfolgen trotz der damit verbundenen Umweltfolgen.","weiteres Wirtschaftswachstum verfolgen. Es gibt viele Wege, um Wirtschaftswachstum mit umweltbezogener Nachhaltigkeit kompatibel zu machen.","Wirtschaftswachstum als politisches Ziel ignorieren, d.h. komplett neutral bezüglich Wachstum sein. Dies wird die Bandbreite politischer Maßnahmen erweitern, um Ziele des Wohlstands und der umweltbezogenen Nachhaltigkeit zu verbinden.","aufhören, Wirtschaftswachstum zu verfolgen. Produktion und Konsum müssen auf gerechte Weise reduziert werden, um umweltbezogene Nachhaltigkeit zu erreichen.","[NA] nicht beantwortet"), ordered=FALSE)
dat_SosciSurvey$IC01 = factor(dat_SosciSurvey$IC01, levels=c("1","2","-9"), labels=c("Hiermit versichere ich, dass ich die oben beschriebenen Teilnahmeinformationen verstanden habe und mit den genannten Teilnahmebedingungen einverstanden bin.","Ich bin mit den genannten Teilnahmebedingungen nicht einverstanden. (Führt zum Abbruch der Studie)","[NA] nicht beantwortet"), ordered=FALSE)
dat_SosciSurvey$PO01 = factor(dat_SosciSurvey$PO01, levels=c("1","2","3","4","5","6","7","8","-9"), labels=c("AfD","CDU/CSU","Die Linke","FDP","Bündnis 90/Die Grünen","SPD","Andere","Ich würde nicht wählen","[NA] nicht beantwortet"), ordered=FALSE)
dat_SosciSurvey$TE02 = factor(dat_SosciSurvey$TE02, levels=c("1","2","-9"), labels=c("Nein","Ja, und zwar","[NA] nicht beantwortet"), ordered=FALSE)
attr(dat_SosciSurvey$GE01_01,"1") = "1 - überhaupt nicht informiert"
attr(dat_SosciSurvey$GE01_01,"2") = "[-2-]"
attr(dat_SosciSurvey$GE01_01,"3") = "[-3-]"
attr(dat_SosciSurvey$GE01_01,"4") = "[-4-]"
attr(dat_SosciSurvey$GE01_01,"5") = "[-5-]"
attr(dat_SosciSurvey$GE01_01,"6") = "[-6-]"
attr(dat_SosciSurvey$GE01_01,"7") = "7 - sehr gut informiert"
attr(dat_SosciSurvey$GE01_02,"1") = "1 - überhaupt nicht informiert"
attr(dat_SosciSurvey$GE01_02,"2") = "[-2-]"
attr(dat_SosciSurvey$GE01_02,"3") = "[-3-]"
attr(dat_SosciSurvey$GE01_02,"4") = "[-4-]"
attr(dat_SosciSurvey$GE01_02,"5") = "[-5-]"
attr(dat_SosciSurvey$GE01_02,"6") = "[-6-]"
attr(dat_SosciSurvey$GE01_02,"7") = "7 - sehr gut informiert"
attr(dat_SosciSurvey$GP_01,"1") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_01,"2") = "Stimme zu"
attr(dat_SosciSurvey$GP_01,"3") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_01,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_01,"5") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_01,"6") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_01,"7") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_02,"1") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_02,"2") = "Stimme zu"
attr(dat_SosciSurvey$GP_02,"3") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_02,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_02,"5") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_02,"6") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_02,"7") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_03,"1") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_03,"2") = "Stimme zu"
attr(dat_SosciSurvey$GP_03,"3") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_03,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_03,"5") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_03,"6") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_03,"7") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_04,"1") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_04,"2") = "Stimme zu"
attr(dat_SosciSurvey$GP_04,"3") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_04,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_04,"5") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_04,"6") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_04,"7") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_05,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_05,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_05,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_05,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_05,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_05,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_05,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_06,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_06,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_06,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_06,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_06,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_06,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_06,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_07,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_07,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_07,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_07,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_07,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_07,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_07,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_08,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_08,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_08,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_08,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_08,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_08,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_08,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_09,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_09,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_09,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_09,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_09,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_09,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_09,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_10,"1") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_10,"2") = "Stimme zu"
attr(dat_SosciSurvey$GP_10,"3") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_10,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_10,"5") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_10,"6") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_10,"7") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_11,"1") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_11,"2") = "Stimme zu"
attr(dat_SosciSurvey$GP_11,"3") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_11,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_11,"5") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_11,"6") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_11,"7") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_12,"1") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_12,"2") = "Stimme zu"
attr(dat_SosciSurvey$GP_12,"3") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_12,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_12,"5") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_12,"6") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_12,"7") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_13,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_13,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_13,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_13,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_13,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_13,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_13,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_14,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_14,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_14,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_14,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_14,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_14,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_14,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_15,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_15,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_15,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_15,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_15,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_15,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_15,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$GP_16,"1") = "Stimme überhaput nicht zu"
attr(dat_SosciSurvey$GP_16,"2") = "Stimme nicht zu"
attr(dat_SosciSurvey$GP_16,"3") = "Stimme eher nicht zu"
attr(dat_SosciSurvey$GP_16,"4") = "Stimme weder zu noch nicht zu"
attr(dat_SosciSurvey$GP_16,"5") = "Stimme eher zu"
attr(dat_SosciSurvey$GP_16,"6") = "Stimme zu"
attr(dat_SosciSurvey$GP_16,"7") = "Stimme voll und ganz zu"
attr(dat_SosciSurvey$PC01_05,"1") = "gar nicht"
attr(dat_SosciSurvey$PC01_05,"2") = "wenig"
attr(dat_SosciSurvey$PC01_05,"3") = "mittelmäßig"
attr(dat_SosciSurvey$PC01_05,"4") = "überwiegend"
attr(dat_SosciSurvey$PC01_05,"5") = "völlig"
attr(dat_SosciSurvey$PC01_08,"1") = "gar nicht"
attr(dat_SosciSurvey$PC01_08,"2") = "wenig"
attr(dat_SosciSurvey$PC01_08,"3") = "mittelmäßig"
attr(dat_SosciSurvey$PC01_08,"4") = "überwiegend"
attr(dat_SosciSurvey$PC01_08,"5") = "völlig"
attr(dat_SosciSurvey$PC01_09,"1") = "gar nicht"
attr(dat_SosciSurvey$PC01_09,"2") = "wenig"
attr(dat_SosciSurvey$PC01_09,"3") = "mittelmäßig"
attr(dat_SosciSurvey$PC01_09,"4") = "überwiegend"
attr(dat_SosciSurvey$PC01_09,"5") = "völlig"
attr(dat_SosciSurvey$PC01_12,"1") = "gar nicht"
attr(dat_SosciSurvey$PC01_12,"2") = "wenig"
attr(dat_SosciSurvey$PC01_12,"3") = "mittelmäßig"
attr(dat_SosciSurvey$PC01_12,"4") = "überwiegend"
attr(dat_SosciSurvey$PC01_12,"5") = "völlig"
attr(dat_SosciSurvey$PC01_06,"1") = "gar nicht"
attr(dat_SosciSurvey$PC01_06,"2") = "wenig"
attr(dat_SosciSurvey$PC01_06,"3") = "mittelmäßig"
attr(dat_SosciSurvey$PC01_06,"4") = "überwiegend"
attr(dat_SosciSurvey$PC01_06,"5") = "völlig"
attr(dat_SosciSurvey$PC01_11,"1") = "gar nicht"
attr(dat_SosciSurvey$PC01_11,"2") = "wenig"
attr(dat_SosciSurvey$PC01_11,"3") = "mittelmäßig"
attr(dat_SosciSurvey$PC01_11,"4") = "überwiegend"
attr(dat_SosciSurvey$PC01_11,"5") = "völlig"
attr(dat_SosciSurvey$PC01_04,"1") = "gar nicht"
attr(dat_SosciSurvey$PC01_04,"2") = "wenig"
attr(dat_SosciSurvey$PC01_04,"3") = "mittelmäßig"
attr(dat_SosciSurvey$PC01_04,"4") = "überwiegend"
attr(dat_SosciSurvey$PC01_04,"5") = "völlig"
attr(dat_SosciSurvey$PC01_03,"1") = "gar nicht"
attr(dat_SosciSurvey$PC01_03,"2") = "wenig"
attr(dat_SosciSurvey$PC01_03,"3") = "mittelmäßig"
attr(dat_SosciSurvey$PC01_03,"4") = "überwiegend"
attr(dat_SosciSurvey$PC01_03,"5") = "völlig"
attr(dat_SosciSurvey$PC06_01,"1") = "völlig unrepräsentativ"
attr(dat_SosciSurvey$PC06_01,"2") = "etwas unrepräsentativ"
attr(dat_SosciSurvey$PC06_01,"3") = "ein wenig unrepräsentativ"
attr(dat_SosciSurvey$PC06_01,"4") = "weder unrepräsentativ noch repräsentativ"
attr(dat_SosciSurvey$PC06_01,"5") = "ein wenig repräsentativ"
attr(dat_SosciSurvey$PC06_01,"6") = "etwas repräsentativ"
attr(dat_SosciSurvey$PC06_01,"7") = "völlig repräsentativ"
attr(dat_SosciSurvey$PC06_02,"1") = "völlig unrepräsentativ"
attr(dat_SosciSurvey$PC06_02,"2") = "etwas unrepräsentativ"
attr(dat_SosciSurvey$PC06_02,"3") = "ein wenig unrepräsentativ"
attr(dat_SosciSurvey$PC06_02,"4") = "weder unrepräsentativ noch repräsentativ"
attr(dat_SosciSurvey$PC06_02,"5") = "ein wenig repräsentativ"
attr(dat_SosciSurvey$PC06_02,"6") = "etwas repräsentativ"
attr(dat_SosciSurvey$PC06_02,"7") = "völlig repräsentativ"
attr(dat_SosciSurvey$PC02_01,"1") = "1 nie/keine"
attr(dat_SosciSurvey$PC02_01,"2") = "2 sehr selten/wenige"
attr(dat_SosciSurvey$PC02_01,"3") = "3 selten/wenige"
attr(dat_SosciSurvey$PC02_01,"4") = "4 manchmal/etwas"
attr(dat_SosciSurvey$PC02_01,"5") = "5 oft/viele"
attr(dat_SosciSurvey$PC02_01,"6") = "6 sehr oft/sehr viele"
attr(dat_SosciSurvey$PC02_02,"1") = "1 nie/keine"
attr(dat_SosciSurvey$PC02_02,"2") = "2 sehr selten/wenige"
attr(dat_SosciSurvey$PC02_02,"3") = "3 selten/wenige"
attr(dat_SosciSurvey$PC02_02,"4") = "4 manchmal/etwas"
attr(dat_SosciSurvey$PC02_02,"5") = "5 oft/viele"
attr(dat_SosciSurvey$PC02_02,"6") = "6 sehr oft/sehr viele"
attr(dat_SosciSurvey$PC02_03,"1") = "1 nie/keine"
attr(dat_SosciSurvey$PC02_03,"2") = "2 sehr selten/wenige"
attr(dat_SosciSurvey$PC02_03,"3") = "3 selten/wenige"
attr(dat_SosciSurvey$PC02_03,"4") = "4 manchmal/etwas"
attr(dat_SosciSurvey$PC02_03,"5") = "5 oft/viele"
attr(dat_SosciSurvey$PC02_03,"6") = "6 sehr oft/sehr viele"
attr(dat_SosciSurvey$PC02_08,"1") = "1 nie/keine"
attr(dat_SosciSurvey$PC02_08,"2") = "2 sehr selten/wenige"
attr(dat_SosciSurvey$PC02_08,"3") = "3 selten/wenige"
attr(dat_SosciSurvey$PC02_08,"4") = "4 manchmal/etwas"
attr(dat_SosciSurvey$PC02_08,"5") = "5 oft/viele"
attr(dat_SosciSurvey$PC02_08,"6") = "6 sehr oft/sehr viele"
attr(dat_SosciSurvey$PC02_04,"1") = "1 nie/keine"
attr(dat_SosciSurvey$PC02_04,"2") = "2 sehr selten/wenige"
attr(dat_SosciSurvey$PC02_04,"3") = "3 selten/wenige"
attr(dat_SosciSurvey$PC02_04,"4") = "4 manchmal/etwas"
attr(dat_SosciSurvey$PC02_04,"5") = "5 oft/viele"
attr(dat_SosciSurvey$PC02_04,"6") = "6 sehr oft/sehr viele"
attr(dat_SosciSurvey$PC02_05,"1") = "1 nie/keine"
attr(dat_SosciSurvey$PC02_05,"2") = "2 sehr selten/wenige"
attr(dat_SosciSurvey$PC02_05,"3") = "3 selten/wenige"
attr(dat_SosciSurvey$PC02_05,"4") = "4 manchmal/etwas"
attr(dat_SosciSurvey$PC02_05,"5") = "5 oft/viele"
attr(dat_SosciSurvey$PC02_05,"6") = "6 sehr oft/sehr viele"
attr(dat_SosciSurvey$PC02_06,"1") = "1 nie/keine"
attr(dat_SosciSurvey$PC02_06,"2") = "2 sehr selten/wenige"
attr(dat_SosciSurvey$PC02_06,"3") = "3 selten/wenige"
attr(dat_SosciSurvey$PC02_06,"4") = "4 manchmal/etwas"
attr(dat_SosciSurvey$PC02_06,"5") = "5 oft/viele"
attr(dat_SosciSurvey$PC02_06,"6") = "6 sehr oft/sehr viele"
attr(dat_SosciSurvey$PC02_07,"1") = "1 nie/keine"
attr(dat_SosciSurvey$PC02_07,"2") = "2 sehr selten/wenige"
attr(dat_SosciSurvey$PC02_07,"3") = "3 selten/wenige"
attr(dat_SosciSurvey$PC02_07,"4") = "4 manchmal/etwas"
attr(dat_SosciSurvey$PC02_07,"5") = "5 oft/viele"
attr(dat_SosciSurvey$PC02_07,"6") = "6 sehr oft/sehr viele"
attr(dat_SosciSurvey$PC03_01,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_01,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_01,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_01,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_01,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$PC03_02,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_02,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_02,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_02,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_02,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$PC03_04,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_04,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_04,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_04,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_04,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$PC03_06,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_06,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_06,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_06,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_06,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$PC03_07,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_07,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_07,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_07,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_07,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$PC03_08,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_08,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_08,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_08,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_08,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$PC03_09,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_09,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_09,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_09,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_09,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$PC03_10,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_10,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_10,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_10,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_10,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$PC03_11,"1") = "trifft gar nicht zu"
attr(dat_SosciSurvey$PC03_11,"2") = "trifft wenig zu"
attr(dat_SosciSurvey$PC03_11,"3") = "trifft teils-teils zu"
attr(dat_SosciSurvey$PC03_11,"4") = "trifft ziemlich zu"
attr(dat_SosciSurvey$PC03_11,"5") = "trifft völlig zu"
attr(dat_SosciSurvey$FINISHED,"F") = "abgebrochen"
attr(dat_SosciSurvey$FINISHED,"T") = "ausgefüllt"
attr(dat_SosciSurvey$Q_VIEWER,"F") = "Teilnehmer"
attr(dat_SosciSurvey$Q_VIEWER,"T") = "Durchklicker"
comment(dat_SosciSurvey$SERIAL) = "Personenkennung oder Teilnahmecode (sofern verwendet)"
comment(dat_SosciSurvey$REF) = "Referenz (sofern im Link angegeben)"
comment(dat_SosciSurvey$QUESTNNR) = "Fragebogen, der im Interview verwendet wurde"
comment(dat_SosciSurvey$MODE) = "Interview-Modus"
comment(dat_SosciSurvey$STARTED) = "Zeitpunkt zu dem das Interview begonnen hat (Europe/Berlin)"
comment(dat_SosciSurvey$AB01) = "Höchster Bildungsabschluss"
comment(dat_SosciSurvey$BD01_01) = "Alter:  ... Jahre"
comment(dat_SosciSurvey$BD02) = "Geschlecht"
comment(dat_SosciSurvey$BD04) = "Muttersprache"
comment(dat_SosciSurvey$BD04_02) = "Muttersprache: Nein, sondern"
comment(dat_SosciSurvey$BD06) = "BD06"
comment(dat_SosciSurvey$BD07) = "LeaderGroup"
comment(dat_SosciSurvey$GE01_01) = "Wissen Econ Env: Wirtschaftsthemen"
comment(dat_SosciSurvey$GE01_02) = "Wissen Econ Env: Ökologische Nachhaltigkeit"
comment(dat_SosciSurvey$GE02) = "Positionierungsfrage"
comment(dat_SosciSurvey$GP_01) = "16C_erste 10: Anhaltendes Wirtschaftswachstum ist unerlässlich, um die Lebenszufriedenheit der Menschen zu verbessern. (umgepolt)"
comment(dat_SosciSurvey$GP_02) = "16C_erste 10: Wirtschaftswachstum ist notwendig, um öffentliche Gesundheits- und Rentensysteme zu finanzieren. (umgepolt)"
comment(dat_SosciSurvey$GP_03) = "16C_erste 10: Ohne Wirtschaftswachstum wird die Wirtschaft instabiler werden. (umgepolt)"
comment(dat_SosciSurvey$GP_04) = "16C_erste 10: Wirtschaftswachstum ist notwendig, um Umweltschutz zu finanzieren. (umgepolt)"
comment(dat_SosciSurvey$GP_05) = "16C_erste 10: Vollbeschäftigung kann ohne Wirtschaftswachstum erreicht werden."
comment(dat_SosciSurvey$GP_06) = "16C_erste 10: Ein ‘gutes Leben’ ohne Wirtschaftswachstum ist möglich."
comment(dat_SosciSurvey$GP_07) = "16C_erste 10: Energieeinsparungen durch technische Fortschritte werden teilweise durch weiteres Wirtschaftswachstum zunichte gemacht."
comment(dat_SosciSurvey$GP_08) = "16C_erste 10: Wirtschaftswachstum schadet immer der Umwelt."
comment(dat_SosciSurvey$GP_09) = "16C_erste 10: Angesichts begrenzter natürlicher Ressourcen müssen reiche Länder möglicherweise ihr Wirtschaftswachstum aufgeben, um sicherzustellen, dass alle armen Menschen auf der Welt einen angemessenen Lebensstandard erreichen können."
comment(dat_SosciSurvey$GP_10) = "16C_erste 10: Technologie kann alle Umweltprobleme lösen, die mit Wirtschaftswachstum zusammenhängen. (umgepolt)"
comment(dat_SosciSurvey$GP_11) = "16_C_restl 6: Zukünftiges Wirtschaftswachstum wird sich wieder erholen und so hoch wie in der Vergangenheit sein. (umgepolt)"
comment(dat_SosciSurvey$GP_12) = "16_C_restl 6: Wirtschaftswachstum erhöht Einkommen, was dazu führt, dass Menschen sich mehr um die Umwelt kümmern. (umgepolt)"
comment(dat_SosciSurvey$GP_13) = "16_C_restl 6: Die Politikerinnen und Politiker sind zu besorgt über das Wirtschaftswachstum."
comment(dat_SosciSurvey$GP_14) = "16_C_restl 6: Die Einkommensverteilung anzugleichen, sollte eine höhere Priorität haben als Wirtschaftswachstum."
comment(dat_SosciSurvey$GP_15) = "16_C_restl 6: Das BIP ist für gesellschaftlichen Wohlstand ein fehlerhaftes Maß."
comment(dat_SosciSurvey$GP_16) = "16_C_restl 6: Wirtschaftswachstum kann durch die Regierung gesteuert werden."
comment(dat_SosciSurvey$IC01) = "Informed Consent (deutsch)"
comment(dat_SosciSurvey$PC01_05) = "Übereinstimmung: Wie sehr stimmen die positiven (grünen) Konzepte der Mind-Map der Gruppenarbeit mit Ihrer eigenen Mind-Map überein?"
comment(dat_SosciSurvey$PC01_08) = "Übereinstimmung: Wie sehr stimmen die negativen (roten) Konzepte der Mind-Map der Gruppenarbeit mit Ihrer eigenen Mind-Map überein?"
comment(dat_SosciSurvey$PC01_09) = "Übereinstimmung: Wie sehr stimmen die neutralen (gelben) Konzepte der Mind-Map der Gruppenarbeit mit Ihrer eigenen Mind-Map überein?"
comment(dat_SosciSurvey$PC01_12) = "Übereinstimmung: Wie sehr stimmen die die ambivalenten (lilalen) Konzepte der Mind-Map der Gruppenarbeit mit Ihrer eigenen Mind-Map überein?"
comment(dat_SosciSurvey$PC01_06) = "Übereinstimmung: Wie sehr stimmt die Mind-Map der Gruppenarbeit in Ihren hemmenden Verbindungen zwischen den einzelnen Konzepten mit Ihrer eigenen Mind-Map überein?"
comment(dat_SosciSurvey$PC01_11) = "Übereinstimmung: Wie sehr stimmt die Mind-Map der Gruppenarbeit in Ihren verstärkenden Verbindungen zwischen den einzelnen Konzepten mit Ihrer eigenen Mind-Map überein?"
comment(dat_SosciSurvey$PC01_04) = "Übereinstimmung: Wie sehr beinhaltet die Mind-Map der Gruppenarbeit die Konzepte Ihrer eigenen Mind-Map?"
comment(dat_SosciSurvey$PC01_03) = "Übereinstimmung: Wie sehr stimmt die Mind-Map der Gruppenarbeit insgesamt mit Ihrer eigenen Mind-Map überein?"
comment(dat_SosciSurvey$PC06_01) = "Repräsentativität: Inwieweit spiegelt Ihre indivduelle Mind-Map Ihre Einstellungen und Gefühle zu Wirtschaftswachstum und ökologischer Nachhaltigkeit wieder?"
comment(dat_SosciSurvey$PC06_02) = "Repräsentativität: Inwieweit spiegelt die Mind-Map aus der Gruppenarbeit Ihre Einstellungen und Gefühle zu Wirtschaftswachstum und ökologischer Nachhaltigkeit wieder?"
comment(dat_SosciSurvey$PC02_01) = "ICS Deutsch Lehmann: Wie viele Reibereien gab es zwischen den Gruppenmitgliedern?"
comment(dat_SosciSurvey$PC02_02) = "ICS Deutsch Lehmann: Wie offensichtlich waren persönliche Konflikte in der Gruppe?"
comment(dat_SosciSurvey$PC02_03) = "ICS Deutsch Lehmann: Wie viele Spannungen gab es zwischen den Gruppenmitgliedern?"
comment(dat_SosciSurvey$PC02_08) = "ICS Deutsch Lehmann: Wie viele emotionale Konflikte gab es zwischen den Gruppenmitgliedern?"
comment(dat_SosciSurvey$PC02_04) = "ICS Deutsch Lehmann: Wie oft waren sich die Gruppenmitglieder uneinig, wie die Arbeit zu erledigen ist?"
comment(dat_SosciSurvey$PC02_05) = "ICS Deutsch Lehmann: Wie häufig gab es Ideenkonflikte in der Gruppe?"
comment(dat_SosciSurvey$PC02_06) = "ICS Deutsch Lehmann: Wie viele die Arbeit betreffende Konflikte gab es in der Gruppe?"
comment(dat_SosciSurvey$PC02_07) = "ICS Deutsch Lehmann: In welchem Ausmaß gab es Meinungsverschiedenheiten in der Gruppe?"
comment(dat_SosciSurvey$PC03_01) = "Eigene Items: Ich bin mit dem Ergebnis der Gruppenarbeit zufrieden."
comment(dat_SosciSurvey$PC03_02) = "Eigene Items: Ich würde das Ergebnis der Gruppenarbeit gerne verändern."
comment(dat_SosciSurvey$PC03_04) = "Eigene Items: Ich habe durch die Gruppenarbeit neues gelernt."
comment(dat_SosciSurvey$PC03_06) = "Eigene Items: Die Gruppenarbeit empfand ich als positiv."
comment(dat_SosciSurvey$PC03_07) = "Eigene Items: Die Gruppenarbeit empfand ich als negativ."
comment(dat_SosciSurvey$PC03_08) = "Eigene Items: Meine Einstellung zu Wirtschaftswachstum und ökologischer Nachhaltigkeit hat sich durch die Gruppenarbeit verändert."
comment(dat_SosciSurvey$PC03_09) = "Eigene Items: Die Gruppenarbeit hat mich in meiner Einstellung zu Wirtschaftswachstum und ölologischer Nachhaltigkeit bestärkt."
comment(dat_SosciSurvey$PC03_10) = "Eigene Items: Die Teilnehmenden haben sich bei der Gruppenarbeit gleichermaßen eingebracht."
comment(dat_SosciSurvey$PC03_11) = "Eigene Items: Ich kannte mindestens eine/n der Teilnehmenden meiner Gruppe zuvor."
comment(dat_SosciSurvey$PC0_GN) = "Gruppennummer: [01]"
comment(dat_SosciSurvey$PO01) = "Sonntagsfrage"
comment(dat_SosciSurvey$PO01_07) = "Sonntagsfrage: Andere"
comment(dat_SosciSurvey$SO01_01) = "Anmerkungen?: [01]"
comment(dat_SosciSurvey$TE01_RV1) = "POST/GET-Variable: participantID"
comment(dat_SosciSurvey$TE02) = "Technische Probleme?"
comment(dat_SosciSurvey$TE02_02) = "Technische Probleme?: Ja, und zwar"
comment(dat_SosciSurvey$TIME001) = "Verweildauer Seite 1"
comment(dat_SosciSurvey$TIME002) = "Verweildauer Seite 2"
comment(dat_SosciSurvey$TIME003) = "Verweildauer Seite 3"
comment(dat_SosciSurvey$TIME004) = "Verweildauer Seite 4"
comment(dat_SosciSurvey$TIME005) = "Verweildauer Seite 5"
comment(dat_SosciSurvey$TIME006) = "Verweildauer Seite 6"
comment(dat_SosciSurvey$TIME007) = "Verweildauer Seite 7"
comment(dat_SosciSurvey$TIME008) = "Verweildauer Seite 8"
comment(dat_SosciSurvey$TIME009) = "Verweildauer Seite 9"
comment(dat_SosciSurvey$TIME010) = "Verweildauer Seite 10"
comment(dat_SosciSurvey$TIME011) = "Verweildauer Seite 11"
comment(dat_SosciSurvey$TIME012) = "Verweildauer Seite 12"
comment(dat_SosciSurvey$TIME013) = "Verweildauer Seite 13"
comment(dat_SosciSurvey$TIME014) = "Verweildauer Seite 14"
comment(dat_SosciSurvey$TIME015) = "Verweildauer Seite 15"
comment(dat_SosciSurvey$TIME016) = "Verweildauer Seite 16"
comment(dat_SosciSurvey$TIME017) = "Verweildauer Seite 17"
comment(dat_SosciSurvey$TIME018) = "Verweildauer Seite 18"
comment(dat_SosciSurvey$TIME019) = "Verweildauer Seite 19"
comment(dat_SosciSurvey$TIME020) = "Verweildauer Seite 20"
comment(dat_SosciSurvey$TIME021) = "Verweildauer Seite 21"
comment(dat_SosciSurvey$TIME022) = "Verweildauer Seite 22"
comment(dat_SosciSurvey$TIME023) = "Verweildauer Seite 23"
comment(dat_SosciSurvey$TIME024) = "Verweildauer Seite 24"
comment(dat_SosciSurvey$TIME025) = "Verweildauer Seite 25"
comment(dat_SosciSurvey$TIME026) = "Verweildauer Seite 26"
comment(dat_SosciSurvey$TIME027) = "Verweildauer Seite 27"
comment(dat_SosciSurvey$TIME028) = "Verweildauer Seite 28"
comment(dat_SosciSurvey$TIME029) = "Verweildauer Seite 29"
comment(dat_SosciSurvey$TIME_SUM) = "Verweildauer gesamt (ohne Ausreißer)"
comment(dat_SosciSurvey$MAILSENT) = "Versandzeitpunkt der Einladungsmail (nur für nicht-anonyme Adressaten)"
comment(dat_SosciSurvey$LASTDATA) = "Zeitpunkt als der Datensatz das letzte mal geändert wurde"
comment(dat_SosciSurvey$FINISHED) = "Wurde die Befragung abgeschlossen (letzte Seite erreicht)?"
comment(dat_SosciSurvey$Q_VIEWER) = "Hat der Teilnehmer den Fragebogen nur angesehen, ohne die Pflichtfragen zu beantworten?"
comment(dat_SosciSurvey$LASTPAGE) = "Seite, die der Teilnehmer zuletzt bearbeitet hat"
comment(dat_SosciSurvey$MAXPAGE) = "Letzte Seite, die im Fragebogen bearbeitet wurde"
comment(dat_SosciSurvey$MISSING) = "Anteil fehlender Antworten in Prozent"
comment(dat_SosciSurvey$MISSREL) = "Anteil fehlender Antworten (gewichtet nach Relevanz)"
comment(dat_SosciSurvey$TIME_RSI) = "Ausfüll-Geschwindigkeit (relativ)"
DT::datatable(dat_SosciSurvey, options = list(pageLength = 5)) 

clean data

table(dat_SosciSurvey$QUESTNNR)

Group_CAM_Pre  Ind_CAM_Post   Ind_CAM_Pre 
           11            34            46 
if(all(unique(CAMfiles_individual[[1]]$participantCAM) %in% dat_SosciSurvey$CASE) &
   all(unique(CAMfiles_group[[1]]$participantCAM) %in% dat_SosciSurvey$CASE)){
 print("all CAM IDs can be found in Soci Survey data set") 
  
  
 tmp_ids <- c(unique(CAMfiles_individual[[1]]$participantCAM),
    unique(CAMfiles_group[[1]]$participantCAM))
 
 
 dat_SosciSurvey <- dat_SosciSurvey[dat_SosciSurvey$CASE %in% tmp_ids, ]
}
[1] "all CAM IDs can be found in Soci Survey data set"
dat_SosciSurvey$QUESTNNR
 [1] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre"
 [5] "Ind_CAM_Pre"   "Group_CAM_Pre" "Ind_CAM_Pre"   "Ind_CAM_Pre"  
 [9] "Ind_CAM_Pre"   "Group_CAM_Pre" "Ind_CAM_Pre"   "Ind_CAM_Pre"  
[13] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre"
[17] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre" "Ind_CAM_Pre"  
[21] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre" "Ind_CAM_Pre"  
[25] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre"
[29] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre" "Ind_CAM_Pre"  
[33] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre" "Ind_CAM_Pre"  
[37] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre" "Ind_CAM_Pre"  
[41] "Ind_CAM_Pre"   "Ind_CAM_Pre"   "Group_CAM_Pre" "Ind_CAM_Pre"  
[45] "Ind_CAM_Pre"  
head(dat_SosciSurvey)
    CASE SERIAL  REF      QUESTNNR      MODE             STARTED AB01 BD01_01
653  653   <NA> <NA>   Ind_CAM_Pre interview 2024-03-13 11:07:29 <NA>      NA
654  654   <NA> <NA>   Ind_CAM_Pre interview 2024-03-13 11:09:34 <NA>      NA
656  656   <NA> <NA>   Ind_CAM_Pre interview 2024-03-13 11:10:00 <NA>      NA
657  657   <NA> <NA> Group_CAM_Pre interview 2024-03-13 11:10:45 <NA>      NA
662  662   <NA> <NA>   Ind_CAM_Pre interview 2024-03-14 14:27:06 <NA>      NA
663  663   <NA> <NA> Group_CAM_Pre interview 2024-03-14 14:28:47 <NA>      NA
    BD02 BD04 BD04_02 BD06 BD07 GE01_01 GE01_02
653 <NA> <NA>    <NA> <NA> <NA>       2       4
654 <NA> <NA>    <NA> <NA> <NA>       2       5
656 <NA> <NA>    <NA> <NA> <NA>       3       6
657 <NA> <NA>    <NA> <NA> <NA>      NA      NA
662 <NA> <NA>    <NA> <NA> <NA>       3       6
663 <NA> <NA>    <NA> <NA> <NA>      NA      NA
                                                                                                                                                            GE02
653                  weiteres Wirtschaftswachstum verfolgen. Es gibt viele Wege, um Wirtschaftswachstum mit umweltbezogener Nachhaltigkeit kompatibel zu machen.
654                  weiteres Wirtschaftswachstum verfolgen. Es gibt viele Wege, um Wirtschaftswachstum mit umweltbezogener Nachhaltigkeit kompatibel zu machen.
656 aufhören, Wirtschaftswachstum zu verfolgen. Produktion und Konsum müssen auf gerechte Weise reduziert werden, um umweltbezogene Nachhaltigkeit zu erreichen.
657                                                                                                                                                         <NA>
662 aufhören, Wirtschaftswachstum zu verfolgen. Produktion und Konsum müssen auf gerechte Weise reduziert werden, um umweltbezogene Nachhaltigkeit zu erreichen.
663                                                                                                                                                         <NA>
    GP_01 GP_02 GP_03 GP_04 GP_05 GP_06 GP_07 GP_08 GP_09 GP_10 GP_11 GP_12
653     3     5     2     2     4     5     6     1     3     4     4     7
654     4     4     4     4     4     5     5     5     5     6     5     5
656     6     5     5     6     6     7     5     4     4     7     5     5
657    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA
662     5     3     4     6     5     5     4     3     6     6     5     7
663    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA
    GP_13 GP_14 GP_15 GP_16
653     3     6     5     6
654     5     6     5     5
656     4     5     6     4
657    NA    NA    NA    NA
662     5     5     6     4
663    NA    NA    NA    NA
                                                                                                                                                            IC01
653 Hiermit versichere ich, dass ich die oben beschriebenen Teilnahmeinformationen verstanden habe und mit den genannten Teilnahmebedingungen einverstanden bin.
654 Hiermit versichere ich, dass ich die oben beschriebenen Teilnahmeinformationen verstanden habe und mit den genannten Teilnahmebedingungen einverstanden bin.
656 Hiermit versichere ich, dass ich die oben beschriebenen Teilnahmeinformationen verstanden habe und mit den genannten Teilnahmebedingungen einverstanden bin.
657                                                                                                                                                         <NA>
662 Hiermit versichere ich, dass ich die oben beschriebenen Teilnahmeinformationen verstanden habe und mit den genannten Teilnahmebedingungen einverstanden bin.
663                                                                                                                                                         <NA>
    PC01_05 PC01_08 PC01_09 PC01_12 PC01_06 PC01_11 PC01_04 PC01_03 PC06_01
653      NA      NA      NA      NA      NA      NA      NA      NA      NA
654      NA      NA      NA      NA      NA      NA      NA      NA      NA
656      NA      NA      NA      NA      NA      NA      NA      NA      NA
657      NA      NA      NA      NA      NA      NA      NA      NA      NA
662      NA      NA      NA      NA      NA      NA      NA      NA      NA
663      NA      NA      NA      NA      NA      NA      NA      NA      NA
    PC06_02 PC02_01 PC02_02 PC02_03 PC02_08 PC02_04 PC02_05 PC02_06 PC02_07
653      NA      NA      NA      NA      NA      NA      NA      NA      NA
654      NA      NA      NA      NA      NA      NA      NA      NA      NA
656      NA      NA      NA      NA      NA      NA      NA      NA      NA
657      NA      NA      NA      NA      NA      NA      NA      NA      NA
662      NA      NA      NA      NA      NA      NA      NA      NA      NA
663      NA      NA      NA      NA      NA      NA      NA      NA      NA
    PC03_01 PC03_02 PC03_04 PC03_06 PC03_07 PC03_08 PC03_09 PC03_10 PC03_11
653      NA      NA      NA      NA      NA      NA      NA      NA      NA
654      NA      NA      NA      NA      NA      NA      NA      NA      NA
656      NA      NA      NA      NA      NA      NA      NA      NA      NA
657      NA      NA      NA      NA      NA      NA      NA      NA      NA
662      NA      NA      NA      NA      NA      NA      NA      NA      NA
663      NA      NA      NA      NA      NA      NA      NA      NA      NA
    PC0_GN PO01 PO01_07 SO01_01 TE01_RV1 TE02 TE02_02 TIME001 TIME002 TIME003
653     NA <NA>    <NA>    <NA>      653 <NA>    <NA>    1793       4       7
654     NA <NA>    <NA>    <NA>      654 <NA>    <NA>    1675       8      10
656     NA <NA>    <NA>    <NA>      656 <NA>    <NA>    1641      12      11
657     NA <NA>    <NA>    <NA>     <NA> <NA>    <NA>    2882      26      NA
662     NA <NA>    <NA>    <NA>      662 <NA>    <NA>    2280      35      14
663     NA <NA>    <NA>    <NA>     <NA> <NA>    <NA>    3579      31      NA
    TIME004 TIME005 TIME006 TIME007 TIME008 TIME009 TIME010 TIME011 TIME012
653      18      95      62      41      10      22      10      15       7
654      12     111      51      59       5       7      10       9       6
656      16      94      63      54       9       6      14      12       5
657      NA      NA      NA      NA      NA      NA      NA      NA      NA
662      21     103      35      59       9      30      22      31       9
663      NA      NA      NA      NA      NA      NA      NA      NA      NA
    TIME013 TIME014 TIME015 TIME016 TIME017 TIME018 TIME019 TIME020 TIME021
653      22       9       4       5       3      14       9       3       6
654       6       5       7       8       2       4       6       3       5
656       9      10       5       2       3      14       7       2       3
657      NA      NA      NA      NA      NA      NA      NA      NA      NA
662      12       7       3       5       4       8       8       1       4
663      NA      NA      NA      NA      NA      NA      NA      NA      NA
    TIME022 TIME023 TIME024 TIME025 TIME026 TIME027 TIME028 TIME029 TIME_SUM
653      10       6       3       6       5       6      12      49     2244
654       6       8       6       7       6       7       7      52     2108
656       6      10       4       5       4       6      12      80     2119
657      NA      NA      NA      NA      NA      NA      NA      NA     2908
662       9       8      10       5       6      11      15      71     2768
663      NA      NA      NA      NA      NA      NA      NA      NA     3610
    MAILSENT            LASTDATA FINISHED Q_VIEWER LASTPAGE MAXPAGE MISSING
653     <NA> 2024-03-13 11:45:05     TRUE    FALSE       29      29       0
654     <NA> 2024-03-13 11:44:42     TRUE    FALSE       29      29       0
656     <NA> 2024-03-13 11:45:19     TRUE    FALSE       29      29       0
657     <NA> 2024-03-13 11:59:13     TRUE    FALSE        2       2      NA
662     <NA> 2024-03-14 15:14:21     TRUE    FALSE       29      29       0
663     <NA> 2024-03-14 15:28:57     TRUE    FALSE        2       2      NA
    MISSREL TIME_RSI
653       0     0.98
654       0     1.14
656       0     1.08
657      NA     0.89
662       0     0.89
663      NA     0.73

save wordlists

setwd("outputs")

## save CAM word lists
# for individual
xlsx::write.xlsx2(x = CAMwordlist_individual, file = "CAMwordlist_individual.xlsx")
# for group
xlsx::write.xlsx2(x = CAMwordlist_group, file = "CAMwordlist_group.xlsx")

set up merged CAM data

setwd("outputs/data CAMs")

### load CAM files
suppressMessages(read_file("CAMdata_IndGroup.txt") %>%
  # ... split it into lines ...
  str_split('\n') %>% first() %>%
    discard(function(x) x == '') %>%
    discard(function(x) x == '\r') %>%
  # ... filter empty rows ...
  discard(function(x) x == '')) -> dat_CAM_indGroup


raw_CAM_indGroup <- list()
for(i in 1:length(dat_CAM_indGroup)){
  raw_CAM_indGroup[[i]] <- jsonlite::fromJSON(txt = dat_CAM_indGroup[[i]])
}
rm(dat_CAM_indGroup)

Create CAM files, draw CAMs and compute network indicators

### create CAM single files (nodes, connectors, merged)
CAMfiles_indGroup <- create_CAMfiles(datCAM = raw_CAM_indGroup, reDeleted = TRUE)
Nodes and connectors, which were deleted by participants were removed. 
 # deleted nodes:  103 
 # deleted connectors:  32
## remove empty concepts:
CAMfiles_indGroup[[1]]$text[nchar(CAMfiles_indGroup[[1]]$text) < 2]
character(0)
# tmp_ids <- CAMfiles_indGroup[[1]]$id[nchar(CAMfiles_indGroup[[1]]$text) < 2]
# table(CAMfiles_indGroup[[1]]$isActive[CAMfiles_indGroup[[1]]$id %in% tmp_ids])
# CAMfiles_indGroup[[1]] <- CAMfiles_indGroup[[1]][!CAMfiles_indGroup[[1]]$id %in% tmp_ids,]

### apply protocol
setwd("outputs CAM-App/02_summarized_CAMdata_IndGroup_word2vec")
text <- readLines("protocol.txt", warn = FALSE)
text <- readLines(textConnection(text, encoding = "UTF-8"), encoding = "UTF-8")

if (testIfJson(file = text)) {
  protocol <-
    rjson::fromJSON(file = "protocol.txt")
  
  ## no CAM deleted
  print(protocol$deletedCAMs)

  tmp_out <- overwriteTextNodes(protocolDat = protocol,
                                nodesDat = CAMfiles_indGroup[[1]])
  CAMfiles_indGroup[[1]] <- tmp_out[[1]]
  # tmp_out[[2]]
  
} else{
  print("Invalid protocol uploaded")
}
list()

time 2024-04-23 11:11:07.803457 at index 1 for approximate matching 
time 2024-04-23 11:11:10.046564 at index 2 for approximate matching 
time 2024-04-23 11:11:16.792532 at index 3 for approximate matching 
time 2024-04-23 11:11:20.44261 at index 4 for approximate matching 
time 2024-04-23 11:11:22.770141 at index 5 for approximate matching 
time 2024-04-23 11:11:28.216995 at index 6 for approximate matching 
time 2024-04-23 11:11:30.96704 at index 7 for approximate matching 
time 2024-04-23 11:11:34.232146 at index 8 for approximate matching 
time 2024-04-23 11:11:38.518702 at index 9 for approximate matching 
time 2024-04-23 11:11:41.921861 at index 10 for approximate matching 
time 2024-04-23 11:11:45.699885 at index 11 for approximate matching 
time 2024-04-23 11:11:51.34494 at index 12 for approximate matching 
time 2024-04-23 11:12:13.3049 at index 13 for approximate matching 
time 2024-04-23 11:12:15.893749 at index 14 for approximate matching 
time 2024-04-23 11:12:20.275445 at index 15 for approximate matching 
time 2024-04-23 11:12:25.353507 at index 16 for approximate matching 
time 2024-04-23 11:12:41.383465 at index 17 for approximate matching 
time 2024-04-23 11:12:47.33484 at index 18 for approximate matching 
time 2024-04-23 11:13:18.634237 at index 19 for approximate matching 
time 2024-04-23 11:13:22.132808 at index 20 for approximate matching 
time 2024-04-23 11:13:45.906254 at index 21 for approximate matching 
time 2024-04-23 11:14:13.042581 at index 22 for approximate matching 
time 2024-04-23 11:14:39.832246 at index 23 for approximate matching 
time 2024-04-23 11:20:36.945985 at index 24 for search terms 
time 2024-04-23 11:21:17.93391 at index 25 for search terms 
time 2024-04-24 07:45:06.789174 at index 26 for word2vec 
### draw CAMs
CAMdrawn_indGroup <- draw_CAM(dat_merged = CAMfiles_indGroup[[3]],
                     dat_nodes = CAMfiles_indGroup[[1]],ids_CAMs = "all",
                     plot_CAM = FALSE,
                     useCoordinates = TRUE,
                     relvertexsize = 3,
                     reledgesize = 1)
processing 45 CAMs... 
[1] "== participantCAM in drawnCAM"
## check for non reasonable words
for(i in 1:length(CAMdrawn_indGroup)){
  if(any(nchar(V(CAMdrawn_indGroup[[i]])$label) < 3)){
    print(V(CAMdrawn_indGroup[[i]])$label)
  }
}

### network indicators
tmp_microIndicator <- c("Wirtschaftswachstum", "Ökologische Nachhaltigkeit") # pre-defined concepts
networkIndicators_individual <- compute_indicatorsCAM(drawn_CAM = CAMdrawn_indGroup, 
                                           micro_degree = tmp_microIndicator, 
                                           micro_valence = tmp_microIndicator, 
                                           micro_centr_clo = tmp_microIndicator, 
                                           micro_transitivity = tmp_microIndicator, 
                                           largestClique = FALSE)


## check for CAMs who have not changed valence
tmp_ids <- networkIndicators_individual$CAM_ID[is.na(networkIndicators_individual$assortativity_valence_macro)]
for(i in tmp_ids){
    plot(CAMdrawn_indGroup[[i]], edge.arrow.size = .7,
       layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
       vertex.size = 10, vertex.label.cex = .9)
}


### wordlist
CAMwordlist_indGroup <- create_wordlist(
  dat_nodes =  CAMfiles_indGroup[[1]],
  dat_merged =  CAMfiles_indGroup[[3]],
  useSummarized = TRUE,
  order = "frequency",
  splitByValence = FALSE,
  comments = TRUE,
  raterSubsetWords = NULL,
  rater = FALSE
)
[1] "create_wordlist - use summarized words"
[1] 134
[1] 636
[1] "temporarily suffixes are added, because not all words have been summarized"
processing 45 CAMs... 
[1] "== participantCAM in drawnCAM"
DT::datatable(CAMwordlist_indGroup, options = list(pageLength = 5)) 

save final files

setwd("outputs")

## save Sosci Survey
xlsx::write.xlsx2(x = dat_SosciSurvey, file = "dat_SosciSurvey.xlsx")
write.csv2(x = dat_SosciSurvey, file = "dat_SosciSurvey.csv")
saveRDS(object = dat_SosciSurvey, file = "dat_SosciSurvey.rds")


## save CAM word lists
# for individual and group
xlsx::write.xlsx2(x = CAMwordlist_indGroup, file = "CAMwordlist_indGroup.xlsx")