## # A tibble: 5 x 2
## Teacher `Average Grade`
## <fct> <dbl>
## 1 Alan 52.1
## 2 Brindley 66.2
## 3 Devin 60.7
## 4 Madelyn 67.2
## 5 Suzana 63.2
blahblahblah
Generally, the first thing we want to look at in one of these analyses is the overall distribution of grades, which we can see in the histogram below.
#Single Histogram
#Histograms
ggplot(data=PresTotals, aes(x=Percentage)) +
geom_histogram(aes(y=..density..), alpha = 0.6, position = "identity") +
labs(x="Grade (Percentage)", y="Density") +
stat_function(fun=dnorm, args = list(mean=mean(PresTotals$Percentage), sd=sd(PresTotals$Percentage)),
color="black", size = 1.4) +
scale_fill_manual(values= c("darkred", "darkgrey")) +
scale_x_continuous(limits = c(0,100)) +
theme_alan() +
ggtitle("Histogram of Grades in AS Global Perspectives Presentations")
ggsave(here("Presentation Histogram.png"), plot = last_plot(), device = NULL, path = NULL,
width = 8, height = 4, units = c("in", "cm", "mm"),
dpi = 600)
Overall performance was around what we expected with an average score of 62.58% and a standard deviation of 15.6% (Range = 20% to 94%).
Lets see how the grades fell on a by-teacher basis.
PresSummaryTeacher <- summarySE(PresTotals, measurevar = "Percentage", groupvars = c("Teacher"))
ggplot(data=PresSummaryTeacher, aes(x=Teacher, y= Percentage, fill = Teacher)) +
geom_bar(stat= "summary", position = pd, width = 0.8) +
geom_errorbar(aes(ymin= Percentage - se, ymax= Percentage + se), width= 0.2, size = 1, position= pd)+
#annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
#annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
#annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
labs(x="Teacher", y="Average Grade (Percentage)") +
theme_alan() +
ggtitle("Bar Plot of Grades (By Teacher) on AS Global Perspectives Presentations")
ggsave(here("Presentation Grades by Teacher.png"), plot = last_plot(), device = NULL, path = NULL,
width = 8, height = 4, units = c("in", "cm", "mm"),
dpi = 600)
Overall the classes were reasonably similar to each other, although mine was worst overall. Its possible this is random, but who knows. I definitely operated under the assumption that students should not be given direct feedback about early versions of their presentations, so its perhaps unsurprising that my students who only had a single round of more directed feedback quite late in the process performed the worst.
PresSummaryMarker <- summarySE(PresTotals, measurevar = "Percentage", groupvars = c("Marker"))
ggplot(data=PresSummaryMarker, aes(x=Marker, y= Percentage, fill = Marker)) +
geom_bar(stat= "summary", position = pd, width = 0.8) +
geom_errorbar(aes(ymin= Percentage - se, ymax= Percentage + se), width= 0.2, size = 1, position= pd)+
#annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
#annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
#annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
labs(x="Question", y="Average Grade (Percentage)") +
theme_alan() +
ggtitle("Bar Plot of Grades (By Marker) on AS Global Perspectives Presentations")
ggsave(here("Presentation Grades by Marker.png"), plot = last_plot(), device = NULL, path = NULL,
width = 8, height = 4, units = c("in", "cm", "mm"),
dpi = 600)
Overall grading appears to be fairly even, although probably worth nothing that Madelyn’s grades given are about 10% higher than average.
PresTotals %<>%
mutate(OwnStudent = ifelse(Teacher == Marker, "Own Student", "Other Student"))
PresSummaryOwnStudent <- summarySE(PresTotals, measurevar = "Percentage", groupvars = c("OwnStudent", "Marker"))
ggplot(data=PresSummaryOwnStudent, aes(x=Marker, y= Percentage, fill = OwnStudent)) +
geom_bar(stat= "summary", position = pd, width = 0.8) +
geom_errorbar(aes(ymin= Percentage - se, ymax= Percentage + se), width= 0.2, size = 1, position= pd)+
#annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
#annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
#annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
labs(x="Marker", y="Average Grade (Percentage)") +
theme_alan() +
ggtitle("Bar Plot of Grades (By Student) on AS Global Perspectives Presentation")
ggsave(here("Presentation Grades by Own Student.png"), plot = last_plot(), device = NULL, path = NULL,
width = 8, height = 4, units = c("in", "cm", "mm"),
dpi = 600)
Its again hard to know if there is anything worth remarking on here - broadly people were pretty even with their grading of their own students/other students. We can see the same overall pattern where Madelyn’s grades are a bit higher, and the grades of her own students highest still. This might reflect some self-bias, but may also just reflect the luck of the draw with which students of her she graded.
PresGrades$GradeF <- factor(PresGrades$Grade)
PresGrades2 <- subset(PresGrades, Criteria != "Total")
Plot1 <-
ggplot(data=PresGrades2, aes(x=GradeF, fill = Teacher)) +
geom_histogram(stat = "count", position = pd, width = 0.8) +
labs(x="Score", y="Count") +
theme_alan() +
facet_wrap(Teacher ~ Criteria, ncol = 5)+
ggtitle("AS Global Perspectives - Histograms of Grades by Teacher and Criteria")
ggsave(here("Presentation Histograms by Teacher and Criteria.png"), plot = last_plot(), device = NULL, path = NULL,
width = 16, height = 10, units = c("in", "cm", "mm"),
dpi = 600)
alt text here
We again can’t see much surprising here probably - Madelyn’s students did best because they were move more likely to score 4s and 5s.
From my own perspective I can see that my students did very poorly on the conclusion criteria- they almost all scored a 1 or a 2, and I can recognize that I was not urgent enough with them in making clear that they needed to provide evidence for their solutions. Instead I probably focused too much on teaching them to have a well-reasoned solution, which ended up being beyond most of their capabilities.
Plot2 <-
ggplot(data=PresGrades2, aes(x=GradeF, fill = Marker)) +
geom_histogram(stat = "count", position = pd, width = 0.8) +
labs(x="Score", y="Count") +
theme_alan() +
facet_wrap(Marker ~ Criteria, ncol = 5)+
ggtitle("AS Global Perspectives - Histograms of Grades by Marker and Criteria")
ggsave(here("Presentation Histograms by Marker and Criteria.png"), plot = Plot2, device = NULL, path = NULL,
width = 16, height = 10, units = c("in", "cm", "mm"),
dpi = 600)
alt text here
We can also look at what grades we each gave out the most commonly. Again I’m not certain what we should take from this, as we all nly worked with relatively small samples of papers, but perhaps some of you might spot things about your own practice here. Again some clumpiness here for some of us. Myself and some others gave out a lot of 2s in conclusions and proposed solutions, whereas Suzanna and Madelyn (and to a lesser extent Devin) seem to have been noticably more lenient in that area with a mean somewhere around 3.
Similarly Madelyn clumped on Differentiation, where she gave mostly 4s and a few 3s (and no grades lower than a 3).
I cannot tell anyone what to make of these numbers, other than that perhaps we can all reflect on them to some limited extent.
PresSummaryTeacherPres <-
summarySE(PresTotals, measurevar = "Presentation", groupvars = c("Teacher")) %>%
mutate(Part = "Presentation") %>%
subset(select = c(Teacher,Presentation, se, Part)) %>%
mutate(Percentage = Presentation/25*100) %>%
setNames(c("Teacher", "Score", "se", "Part", "Percentage"))
PresSummaryTeacher2 <-
summarySE(PresTotals, measurevar = "Reflection", groupvars = c("Teacher")) %>%
mutate(Part = "Reflection") %>%
subset(select = c(Teacher,Reflection, se, Part)) %>%
mutate(Percentage = Reflection/10*100) %>%
setNames(c("Teacher", "Score", "se", "Part", "Percentage")) %>%
rbind.data.frame(PresSummaryTeacherPres)
ggplot(data=PresSummaryTeacher2, aes(x=Teacher, y= Percentage, fill = Teacher)) +
geom_bar(stat= "summary", position = pd, width = 0.8) +
geom_errorbar(aes(ymin= Percentage - se, ymax= Percentage + se), width= 0.2, size = 1, position= pd)+
#annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
#annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
#annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
labs(x="Teacher", y="Average Grade (Percentage)") +
theme_alan() +
facet_wrap(~Part, ncol = 1) +
ggtitle("Bar Plot of Grades (By Teacher) on AS Global Perspectives Presentations")
ggsave(here("Presentation Grades by Teacher.png"), plot = last_plot(), device = NULL, path = NULL,
width = 8, height = 4, units = c("in", "cm", "mm"),
dpi = 600)
PresTotalsPres <-
subset(PresTotals, select = c(Teacher, Student, Presentation)) %>%
mutate(Percentage = Presentation / 25 *100) %>%
subset(select = -Presentation) %>%
mutate(Part = "Presentation")
PresTotals2 <-
subset(PresTotals, select = c(Teacher, Student, Reflection)) %>%
mutate(Percentage = Reflection / 10 *100) %>%
subset(select = -Reflection) %>%
mutate(Part = "Reflection") %>%
rbind.data.frame(PresTotalsPres)
PresTotals2$PercentageF <- factor(PresTotals2$Percentage)
Plot3 <-
ggplot(data=PresTotals2, aes(x=PercentageF, fill = Teacher)) +
geom_histogram(stat = "count", position = pd, width = 0.8) +
labs(x="Score", y="Count") +
theme_alan() +
facet_grid(Teacher ~ Part) +
theme(legend.position = "none") +
ggtitle("AS Global Perspectives - Histograms of Grades by Teacher and Component")
ggsave(here("Presentation Histograms by Teacher and Component.png"), plot = Plot3, device = NULL, path = NULL,
width = 16, height = 10, units = c("in", "cm", "mm"),
dpi = 600)
alt text here
So there we see at least part of the difference - I had a total of four students (1/4 of my class) not hand in a reflective essay at all. Brindley also appears to have two who didn’t hand in, although I’m more inclined to think he simply didn’t enter those grades into the sheet.
There are many ways we could assign grade boundaries, depending on how generous we are feeling. The simplest would be to use the same grade boundaries the school usually uses: A star is 90, A is 80, B is 70, and so on and so forth. If we do that let’s take a look at where the grades would fall
AStars1 <- subset(PresTotals, Percentage > 89.99)
AStars1$Boundary1 <- "A*"
As1 <- subset(PresTotals, Percentage < 89.99 & Percentage > 79.99)
As1$Boundary1 <- "A"
Bs1 <- subset(PresTotals, Percentage < 79.99 & Percentage > 69.99)
Bs1$Boundary1 <- "B"
Cs1 <- subset(PresTotals, Percentage < 69.99 & Percentage > 59.99)
Cs1$Boundary1 <- "C"
Ds1 <- subset(PresTotals, Percentage < 59.99 & Percentage > 49.99)
Ds1$Boundary1 <- "D"
Es1 <- subset(PresTotals, Percentage < 49.99 & Percentage > 39.99)
Es1$Boundary1 <- "E"
Us1 <- subset(PresTotals, Percentage < 39.99)
Us1$Boundary1 <- "U"
PresTotals2 <- rbind.data.frame(AStars1, As1, Bs1, Cs1, Ds1, Es1, Us1)
PresTotals2$Boundary1 <- factor(PresTotals2$Boundary1, levels = c("U", "E", "D", "C", "B", "A", "A*"))
ggplot(data=PresTotals2, aes(x=Boundary1)) +
geom_histogram(stat = "count", position = pd, width = 0.8) +
labs(x="Score", y="Count") +
theme_alan() +
ggtitle("AS Global Perspectives - Grade Boundaries - Presentations")
ggsave(here("AS Global Perspectives- Histogram of Grade Boundaries - Presentations.png"), plot = last_plot(), device = NULL, path = NULL,
width = 8, height = 6, units = c("in", "cm", "mm"),
dpi = 600)
So first thing is first we can see that the overall grades are too low if we assign grade boundaries in this way, which of course again means that we need to curve the grades, because currently the average is sitting around a C, with over 40 students at a D or below.
ggplot(data=PresTotals2, aes(x=Boundary1, fill = Teacher)) +
geom_histogram(stat = "count", position = pd, width = 0.8) +
labs(x="Score", y="Count") +
theme_alan() +
facet_wrap( ~ Teacher, ncol = 1)+
ggtitle("AS Global Perspectives - Grade Boundaries by Teacher")
ggsave(here("AS Global Perspectives- Histogram of Grade Boundaries by Teacher.png"), plot = last_plot(), device = NULL, path = NULL,
width = 8, height = 6, units = c("in", "cm", "mm"),
dpi = 600)
We can also see the differences between the classes even more starkly here. Only Madelyn and Suzana have A* students on this assignment, and the majority of my class is at a D or below. So clearly we need to curve some grades.
#2020
GP2019 <- read.csv(here("AGPGrades2019.csv"))
GP2019$MTLetter <- factor(GP2019$MTLetter, levels = c("U", "E", "D", "C", "B", "A"))
GP2019Cut <-
GP2019 %>%
mutate(Year = "2019") %>%
subset(select = c(`DC....`, Year, PresPerc2)) %>%
setNames(c("Student", "Year", "Score"))
GP2020Cut <-
PresTotals2 %>%
mutate(Year = "2020") %>%
subset(select = c(Student, Year, Percentage)) %>%
setNames(c("Student", "Year", "Score"))
GPTotals <-
rbind.data.frame(GP2019Cut, GP2020Cut) %>%
mutate(Score = as.numeric(as.character(Score))) %>%
mutate(Score = replace_na(Score, 0)) %>%
subset(Score != 0)
ggplot(data=GPTotals, aes(x=Score)) +
geom_histogram(stat = "count", position = pd, width = 0.8) +
labs(x="Score", y="Count") +
facet_wrap(~ Year, ncol = 1) +
theme_alan() +
ggtitle("AS Global Perspectives - Distribution of Raw Scores and Letter Grades")
We also appear to have been quite a bit harder on students for their presentations this year than we were last year, which is the opposite of what we saw for the Midterm. Not super relevant for now, but something ot think about when standardising grading in the future.
Lets see how well simply apply a “standard” curve to the grades by turning them into a normal distribution
library(ProfessR)
B = boxplot(PresTotals2$Percentage, plot=FALSE)
divs = c(min(PresTotals2$Percentage), B$stats[1:4] + diff(B$stats)/2, max(PresTotals2$Percentage) )
D1 = do.grades(PresTotals2$Percentage, divs=divs, tit="AS GP Presentations")
## Grade divisions:
## 20
## 35.5
## 57
## 68.5
## 84
## 94
## Letter Grade Distribution:
## 1 A+ 3
## 2 A 4
## 3 A- 4
## 4 B+ 7
## 5 B 10
## 6 B- 11
## 7 C+ 5
## 8 C 13
## 9 C- 16
## 10 D+ 14
## 11 D 10
## 12 D- 3
## 13 F 6
## [1] "Mean Score= 74.9110327807836"
So this gives us some idea of what a normal distribution around these grades would look like, but its arguably still too many students being given a D or less, and of course doesn’t include grades of U, which we also need.
count(GP2020Cut$Score) %>%
knitr::kable(caption = "AS GP Grade Counts", "html", row.names = F) %>%
kable_styling(full_width = F)
| x | freq |
|---|---|
| 20 | 1 |
| 26 | 1 |
| 29 | 1 |
| 31 | 1 |
| 34 | 2 |
| 37 | 1 |
| 40 | 2 |
| 43 | 3 |
| 46 | 1 |
| 49 | 6 |
| 51 | 9 |
| 54 | 5 |
| 57 | 11 |
| 60 | 5 |
| 63 | 13 |
| 66 | 5 |
| 69 | 5 |
| 71 | 6 |
| 74 | 7 |
| 77 | 3 |
| 80 | 4 |
| 83 | 3 |
| 86 | 4 |
| 89 | 4 |
| 91 | 1 |
| 94 | 2 |
So there you can see the counts of actual grades, which we can use to try to set boundaries as we have done before.
So first we need to set a U boundary. If we stuck with the school’s system without bumping this would give us 7 students with a grade of U - higher than the 3 we gave a U on the midterm to. So lets put our boundary for a U grade at 30 percent or less - which again gives us 3 students with a U.
For the midterm we gave 5 students an E grade. So lets give 6 students an E grade for the presentation - meaning scores of 40% or less get an E
On the midterm we gave 8 A* Grades - a value equal to the number of students to whom we gave an E or a U. If we did the same here we would need to give an A-star to 9 students (3 U, 6E). Because of where our students fall this means giving an A* to either 7 (all students >=89%) or 11 (all students >=86%) students. Because I think our overall grades are still quite low, I’m going to go to the charitable side of things and award 11 A*
An A would normally be 80-90%, but we’ve moved our A* boundary down, so there are only 7 students left within the 80 range. To get up to a similar number of A grades lets shift our boundary down to 74% for an A, which gaves us 17 A students (we gave 15 As on the Midterm)
We gave 11 Ds on the midterm under the assumption that we wanted less Ds than As - still true. So lets only give a D to 49 or below (10 students), because if we go up by one more level we have to give 19 Ds
This leaves us with 59 student between B and C. We want the numbers of these two to be about even, maybe slightly skewed towards a C (because we gave more A/Astar than the lower grades). But because of the way the grades fall we can actually split this basically right in half.
We give 29 grades of B for scores between 63 and 71%, and 30 grades of C for scores between 50 and 60%
So we end up with these for grade divisions (number of scores is in brackets):
A*: >85 (11) A: 74 - 85 (17) B: 63 - 71 (29) C: 50 - 60 (30) D: 41 - 49 (10) E: 31 - 40 (6) U: <30% (3)
Now we we just need to assign the grades for these, which I do below
#A Stars - 11 students between 86 and 94
AStars1 <- subset(PresTotals2, Percentage > 85)
AStars1$Boundary2 <- "A*"
AStars1$PercentageCurve <- 90 + (AStars1$Percentage - 86)
#As - 17 students between 74 and 83
As1 <- subset(PresTotals2, Percentage < 84 & Percentage > 73)
As1$Boundary2 <- "A"
As1$PercentageCurve <- (As1$Percentage - 74) + 80
#Bs - 29 students between 63 and 71
Bs1 <- subset(PresTotals2, Percentage < 72 & Percentage > 62)
Bs1$Boundary2 <- "B"
Bs1$PercentageCurve <- (Bs1$Percentage - 63) + 70
#Cs - 30 students between 50 and 60
Cs1 <- subset(PresTotals2, Percentage < 61 & Percentage > 49)
Cs1$Boundary2 <- "C"
Cs1$PercentageCurve <- (Cs1$Percentage - 50) + 59
#Ds- 10 students between 41 and 49
Ds1 <- subset(PresTotals2, Percentage < 50 & Percentage > 40)
Ds1$Boundary2 <- "D"
Ds1$PercentageCurve <- (Ds1$Percentage - 41) + 48
#Es- 6 students between 31 and 40
Es1 <- subset(PresTotals2, Percentage < 41 & Percentage > 30)
Es1$Boundary2 <- "E"
Es1$PercentageCurve <- (Es1$Percentage - 31) + 40
#Us - 3 Students below 30% (all set to 30% to not allow this one project to completely tank their grades)
Us1 <- subset(PresTotals2, Percentage < 31)
Us1$Boundary2 <- "U"
Us1$PercentageCurve <- 30
PresTotals3 <-
rbind.data.frame(AStars1, As1, Bs1, Cs1, Ds1, Es1, Us1) %>%
arrange(Student)
FinalGrades <-
PresTotals2 %>%
arrange(Student) %>%
mutate(`Percentage (Curved)` = PresTotals3$PercentageCurve) %>%
mutate(Boundary2 = PresTotals3$Boundary2) %>%
subset(select = c(Student, Teacher, Presentation, Reflection, Percentage, `Percentage (Curved)`, Boundary2)) %>%
setNames(c("Student", "Teacher", "Presentation", "Reflection", "Percentage", "Percentage (Curved)", "Letter Grade")) %>%
mutate(`Letter Grade` = factor(`Letter Grade`, levels = c("U", "E", "D", "C", "B", "A", "A*"))) %>%
arrange(-Percentage)
head(FinalGrades, 10)
## Student Teacher Presentation Reflection Percentage
## 1 KyleJr. Cheng Suzana 23 10 94
## 2 Lemon Zhang Suzana 24 9 94
## 3 Jessica Guo Madelyn 22 10 91
## 4 Esther Zhang Madelyn 23 8 89
## 5 Justin Pang Brindley 21 10 89
## 6 Paulin Zhang Madelyn 22 9 89
## 7 Sophia Xie Brindley 23 8 89
## 8 Ella Liu Devin 22 8 86
## 9 Jane Wang Madelyn 22 8 86
## 10 Jolin Ren Suzana 22 8 86
## Percentage (Curved) Letter Grade
## 1 98 A*
## 2 98 A*
## 3 95 A*
## 4 93 A*
## 5 93 A*
## 6 93 A*
## 7 93 A*
## 8 90 A*
## 9 90 A*
## 10 90 A*
So now lets look at the distribution of the grades now
ggplot(data=FinalGrades, aes(x=`Percentage (Curved)` , fill = `Letter Grade`)) +
geom_histogram(stat = "count", position = pd, width = 0.8) +
labs(x="Score", y="Count") +
theme_alan() +
ggtitle("AS Global Perspectives - Revised Distribution of Letter Grades")
Woohoo - looks pretty good. Lets output this as some spreadsheets
If you have any questions or comments about student performance in the class, please don’t hesitate to get in touch via email to Alan Nielsen.
This report was generated using R Markdown.