## # A tibble: 5 x 2
##   Teacher  `Average Grade`
##   <chr>              <dbl>
## 1 Alan                53.6
## 2 Brindley            65.5
## 3 Devin               59.4
## 4 Madelyn             69.4
## 5 Suzana              63.2

Basics of AS Global Perspectives Presentations

blahblahblah

Overall Performance

Generally, the first thing we want to look at in one of these analyses is the overall distribution of grades, which we can see in the histogram below.

#Single Histogram
#Histograms
ggplot(data=PresTotals, aes(x=Total)) +
  geom_histogram(aes(y=..density..), alpha = 0.6, position = "identity") +
  labs(x="Grade (Percentage)", y="Density") +
  stat_function(fun=dnorm, args = list(mean=mean(PresTotals$Total), sd=sd(PresTotals$Total)), 
                color="black", size   = 1.4) +
  scale_fill_manual(values= c("darkred", "darkgrey")) +
  scale_x_continuous(limits = c(0,100)) +
  theme_alan() +
  ggtitle("Histogram of Grades in AS Global Perspectives Presentations")

ggsave(here("Presentation Histogram.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 4, units = c("in", "cm", "mm"),
       dpi = 600)

Overall performance was around what we expected with an average score of 62.92% and a standard deviation of 16.3 (Range = 20% to 96%).

Grades by Teacher

Lets see how the grades fell on a by-teacher basis.

PresSummaryTeacher <- summarySE(PresTotals, measurevar = "Total", groupvars = c("Teacher"))

ggplot(data=PresSummaryTeacher, aes(x=Teacher, y= Total, fill = Teacher)) +
  geom_bar(stat= "summary", position = pd, width = 0.8) +
  geom_errorbar(aes(ymin= Total - se, ymax= Total + se), width= 0.2, size = 1, position= pd)+
  #annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
  #annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
  #annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
  labs(x="Teacher", y="Average Grade (Percentage)") +
  theme_alan() +
  ggtitle("Bar Plot of Grades (By Teacher) on AS Global Perspectives Presentations") 

ggsave(here("Presentation Grades by Teacher.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 4, units = c("in", "cm", "mm"),
       dpi = 600)

Overall the classes were reasonably similar to each other, although mine was worst overall. Its possible this is random, but who knows. I definitely operated under the assumption that students should not be given direct feedback about early versions of their presentations, so its perhaps unsurprising that my students who only had a single round of more directed feedback quite late in the process performed the worst.

Grades by Marker

PresSummaryMarker <- summarySE(PresTotals, measurevar = "Total", groupvars = c("Marker"))

ggplot(data=PresSummaryMarker, aes(x=Marker, y= Total, fill = Marker)) +
  geom_bar(stat= "summary", position = pd, width = 0.8) +
  geom_errorbar(aes(ymin= Total - se, ymax= Total + se), width= 0.2, size = 1, position= pd)+
  #annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
  #annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
  #annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
  labs(x="Question", y="Average Grade (Percentage)") +
  theme_alan() +
  ggtitle("Bar Plot of Grades (By Marker) on AS Global Perspectives Presentations") 

ggsave(here("Presentation Grades by Marker.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 4, units = c("in", "cm", "mm"),
       dpi = 600)

Overall grading appears to be fairly even, although probably worth nothing that Madelyn’s grades given are about 10% higher than average.

Grades by Own vs Other Students

PresTotals %<>%
  mutate(OwnStudent = ifelse(Teacher == Marker, "Own Student", "Other Student"))
  
PresSummaryOwnStudent <- summarySE(PresTotals, measurevar = "Total", groupvars = c("OwnStudent", "Marker"))


ggplot(data=PresSummaryOwnStudent, aes(x=Marker, y= Total, fill = OwnStudent)) +
  geom_bar(stat= "summary", position = pd, width = 0.8) +
  geom_errorbar(aes(ymin= Total - se, ymax= Total + se), width= 0.2, size = 1, position= pd)+
  #annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
  #annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
  #annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
  labs(x="Marker", y="Average Grade (Percentage)") +
  theme_alan() + 
  ggtitle("Bar Plot of Grades (By Student) on AS Global Perspectives Presentation") 

ggsave(here("Presentation Grades by Own Student.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 4, units = c("in", "cm", "mm"),
       dpi = 600)

Its again hard to know if there is anything worth remarking on ehre - broadly people were pretty even with their grading of their own students/other students. We can see the same overall pattern where Madelyn’s grades are a bit higher, and the grades of her own students highest still. This might reflect some self-bias, but may also just reflect the luck of the draw with which students of her she graded.

Histograms of Grades Given by Teacher and Criteria

PresGrades$GradeF <- factor(PresGrades$Grade)
PresGrades2 <- subset(PresGrades, Criteria != "Total")

Plot1 <- 
ggplot(data=PresGrades2, aes(x=GradeF, fill = Teacher)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  facet_wrap(Teacher ~ Criteria, ncol = 5)+
  ggtitle("AS Global Perspectives - Histograms of Grades by Teacher and Criteria")

ggsave(here("Presentation Histograms by Teacher and Criteria.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 16, height = 10, units = c("in", "cm", "mm"),
       dpi = 600)

alt text here

We again can’t see much surprising here probably - Madelyn’s students did best because they were move more likely to score 4s and 5s.

From my own perspective I can see that my students did very poorly on the conclusion criteria- they almost all scored a 1 or a 2, and I can recognize that I was not urgent enough with them in making clear that they needed to provide evidence for their solutions. Instead I probably focused too much on teaching them to have a well-reasoned solution, which ended up being beyond most of their capabilities.

Histograms of Grades Given by Marker and Criteria

Plot2 <- 
ggplot(data=PresGrades2, aes(x=GradeF, fill = Marker)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  facet_wrap(Marker ~ Criteria, ncol = 5)+
  ggtitle("AS Global Perspectives - Histograms of Grades by Marker and Criteria")

ggsave(here("Presentation Histograms by Marker and Criteria.png"), plot = Plot2, device = NULL, path = NULL,
       width = 16, height = 10, units = c("in", "cm", "mm"),
       dpi = 600)

alt text here

We can also look at what grades we each gave out the most commonly. Again I’m not certain what we should take from this, as we all nly worked with relatively small samples of papers, but perhaps some of you might spot things about your own practice here. Again some clumpiness here for some of us. Myself and some others gave out a lot of 2s in conclusions and proposed solutions, whereas Suzanna and Madelyn (and to a lesser extent Devin) seem to have been noticably more lenient in that area with a mean somewhere around 3.

Similarly Madelyn clumped on Differentiation, where she gave mostly 4s and a few 3s (and no grades lower than a 3).

I cannot tell anyone what to make of these numbers, other than that perhaps we can all reflect on them to some limited extent.

Grade Boundary Adjustments

There are many ways we could assign grade boundaries, depending on how generous we are feeling. The simplest would be to use the same grade boundaries the school usually uses: A star is 90, A is 80, B is 70, and so on and so forth. If we do that let’s take a look at where the grades would fall

AStars1 <- subset(PresTotals, Total > 89.99)
AStars1$Boundary1 <- "A*"

As1 <- subset(PresTotals, Total < 89.99 & Total > 79.99)
As1$Boundary1 <- "A"

Bs1 <- subset(PresTotals, Total < 79.99 & Total > 69.99)
Bs1$Boundary1 <- "B"

Cs1 <- subset(PresTotals, Total < 69.99 & Total > 59.99)
Cs1$Boundary1 <- "C"

Ds1 <- subset(PresTotals, Total < 59.99 & Total > 49.99)
Ds1$Boundary1 <- "D"

Es1 <- subset(PresTotals, Total < 49.99 & Total > 39.99)
Es1$Boundary1 <- "E"

Us1 <- subset(PresTotals, Total < 39.99)
Us1$Boundary1 <- "U"

PresTotals2 <- rbind.data.frame(AStars1, As1, Bs1, Cs1, Ds1, Es1, Us1)
PresTotals2$Boundary1 <- factor(PresTotals2$Boundary1, levels = c("U", "E", "D", "C", "B", "A", "A*"))

  ggplot(data=PresTotals2, aes(x=Boundary1)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  ggtitle("AS Global Perspectives - Grade Boundaries - Presentations")

ggsave(here("AS Global Perspectives- Histogram of Grade Boundaries - Presentations.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 6, units = c("in", "cm", "mm"),
       dpi = 600)



  ggplot(data=PresTotals2, aes(x=Boundary1, fill = Teacher)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  facet_wrap( ~ Teacher, ncol = 1)+
  ggtitle("AS Global Perspectives - Grade Boundaries by Teacher")

ggsave(here("AS Global Perspectives- Histogram of Grade Boundaries by Teacher.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 6, units = c("in", "cm", "mm"),
       dpi = 600)

Here is where things start to get really ugly. Over half of the total “E” presentation grades are in my class, where these is only a single B and a single A (and no A-stars). The only U scores are in Brindley and Suzana’s classes, and Devin also lacks a single A* student.

So lets compare this to last year.

#2019
GP2019 <- read.csv(here("AGPGrades2019.csv"))

GP2019 %<>%
  subset(select = c(DC...., Pres, PresPerc)) %>%
  subset(Pres != 0) 


AStars1 <- subset(GP2019, PresPerc > 89.99)
AStars1$Boundary1 <- "A*"

As1 <- subset(GP2019, PresPerc < 89.99 & PresPerc > 79.99)
As1$Boundary1 <- "A"

Bs1 <- subset(GP2019, PresPerc < 79.99 & PresPerc > 69.99)
Bs1$Boundary1 <- "B"

Cs1 <- subset(GP2019, PresPerc < 69.99 & PresPerc > 59.99)
Cs1$Boundary1 <- "C"

Ds1 <- subset(GP2019, PresPerc < 59.99 & PresPerc > 49.99)
Ds1$Boundary1 <- "D"

Es1 <- subset(GP2019, PresPerc < 49.99 & PresPerc > 39.99)
Es1$Boundary1 <- "E"

Us1 <- subset(GP2019, PresPerc < 39.99)
Us1$Boundary1 <- "U"

`2019Totals` <- rbind.data.frame(AStars1, As1, Bs1, Cs1, Ds1, Es1, Us1)
`2019Totals`$Boundary1 <- factor(`2019Totals`$Boundary1, levels = c("U", "E", "D", "C", "B", "A", "A*"))

  ggplot(data=`2019Totals`, aes(x=Boundary1)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  ggtitle("AS Global Perspectives - Grade Boundaries - Presentations")

Conclusions

adsfasdf

If you have any questions or comments about student performance in the class, please don’t hesitate to get in touch via email to Alan Nielsen or June Zhu.

This report was generated using R Markdown.