## # A tibble: 5 x 2
##   Teacher  `Average Grade`
##   <chr>              <dbl>
## 1 Alan                60.2
## 2 Brindley            71.8
## 3 Devin               62.2
## 4 Madelyn             68.8
## 5 Suzana              62.3

Basics of AS Global Perspectives Midterm

blahblahblah

Overall Performance

Generally, the first thing we want to look at in one of these analyses is the overall distribution of grades, which we can see in the histogram below.

#Single Histogram
#Histograms
ggplot(data=MidtermTotals, aes(x=Grade)) +
  geom_histogram(aes(y=..density..), alpha = 0.6, position = "identity") +
  labs(x="Grade (Percentage)", y="Density") +
  stat_function(fun=dnorm, args = list(mean=mean(MidtermTotals$Grade), sd=sd(MidtermTotals$Grade)), 
                color="black", size   = 1.4) +
  scale_fill_manual(values= c("darkred", "darkgrey")) +
  scale_x_continuous(limits = c(0,100)) +
  theme_alan() +
  ggtitle("Histogram of Grades in AS Global Perspectives Midterm")

ggsave(here("Midterm 1 Histogram.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 4, units = c("in", "cm", "mm"),
       dpi = 600)

Overall performance was around what we expected with an average score of 65.43% and a standard deviation of 15.8 (Range = 16.6666667% to 90%).

Grades by Teacher

Lets see how the grades fell on a by-teacher basis.

MidtermSummaryTeacher <- summarySE(MidtermGrades, measurevar = "Grade", groupvars = c("Teacher", "Question"))

MidtermSummaryTeacher %<>%
  subset(Teacher != 0&Teacher != "")

ggplot(data=MidtermSummaryTeacher, aes(x=Question, y= Grade, fill = Teacher)) +
  geom_bar(stat= "summary", position = pd, width = 0.8) +
  geom_errorbar(aes(ymin= Grade - se, ymax= Grade + se), width= 0.2, size = 1, position= pd)+
  #annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
  #annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
  #annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
  labs(x="Question", y="Average Grade (Value)") +
  theme_alan() +
  ggtitle("Bar Plot of Grades (By Teacher) on AS Global Perspectives Midterm") 

ggsave(here("MidtermGrades by Teacher.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 4, units = c("in", "cm", "mm"),
       dpi = 600)

There isn’t anything too surprising in the by-teacher average grade per question. All classes performed prett similarly on questions 1A and 1B, which is as expected because those should be the “gimme” questions on a Cambridge exam.

The difference between classes emerges on the harder questions 2 and 3, where there is a fairly clear difference between Brindley and Madelyn’s classes and the others. At least in Brindley’s case this is to be expected, as these are his IGCSE students and probably the most prepared for the course overall. I leave other interpretation up to the rest of you.

Grades by Marker

MidtermSummaryMarker <- summarySE(MidtermGrades, measurevar = "Grade", groupvars = c("Marker", "Question"))

MidtermSummaryMarker %<>%
  subset(Marker != 0&Marker != "")

ggplot(data=MidtermSummaryMarker, aes(x=Question, y= Grade, fill = Marker)) +
  geom_bar(stat= "summary", position = pd, width = 0.8) +
  geom_errorbar(aes(ymin= Grade - se, ymax= Grade + se), width= 0.2, size = 1, position= pd)+
  #annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
  #annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
  #annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
  labs(x="Question", y="Average Grade (Value)") +
  theme_alan() +
  ggtitle("Bar Plot of Grades (By Marker) on AS Global Perspectives Midterm") 

ggsave(here("MidtermGrades by Marker.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 4, units = c("in", "cm", "mm"),
       dpi = 600)

What’s probably most relevant is whether any of us was marking significantly more leniently than the others, and this doesn’t appear to be the case overall for any question - the average grade given by each teacher is pretty similar no matter which question we are looking at.

This might suggest that we don’t have much to worry about when it comes to further adjudication and grade boundaries, but I still suggest we do at least a few examples. (if for no other reason than to have things to show to the students)

Grades by Own vs Other Students

MidtermGrades %<>%
  subset(Marker != 0&Marker != ""&Teacher != 0 & Teacher != "") %>%
  mutate(OwnStudent = ifelse(Teacher == Marker, "Own Student", "Other Student"))
  
MidtermSummaryOwnStudent <- summarySE(MidtermGrades, measurevar = "Grade", groupvars = c("OwnStudent", "Teacher", "Question"))


ggplot(data=MidtermSummaryOwnStudent, aes(x=Question, y= Grade, fill = OwnStudent)) +
  geom_bar(stat= "summary", position = pd, width = 0.8) +
  geom_errorbar(aes(ymin= Grade - se, ymax= Grade + se), width= 0.2, size = 1, position= pd)+
  #annotate("text", x=11.5, y=7.2, label = "Average Grades", hjust = 0, fontface = "bold") +
  #annotate("text", x=11.5, y=6.7, label = "Alan's Class = 52.3%", hjust = 0, color = "darkred") +
  #annotate("text", x=11.5, y=6.2, label = "June's Class = 67.2%", hjust = 0, color = "darkgrey") +
  labs(x="Question", y="Average Grade (Value)") +
  theme_alan() + 
  facet_wrap(~Teacher, scales = "free_x") +
  ggtitle("Bar Plot of Grades (By Student) on AS Global Perspectives Midterm") 

ggsave(here("MidtermGrades by Own Student.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 4, units = c("in", "cm", "mm"),
       dpi = 600)

The final basic thing we should check for is whether teachers were unfair when grading their own students. Its possible, for example, that we might grade our own students more favorably. But we can see from the graphs above that this doesn’t appear to be the case- for basically all of the teachers the grades given to own/other students are identical. The only exception is Suzana, who might have been a little bit harder on her own students (but more likely this is just a samping issue - we all only received a handful of our own students to grade and might have been given a bad handful).

Histograms of Grades Given by Teacher and Question

MidtermGrades$GradeF <- factor(MidtermGrades$Grade)

LimitedGrades<- subset(MidtermGrades, Question == "2"|Question == "3")

Plot1<- 
  ggplot(data=LimitedGrades, aes(x=GradeF, fill = Teacher)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  facet_wrap(Teacher ~ Question, scales = "free_x", ncol = 2)+
  ggtitle("AS Global Perspectives - Histograms of Grades by Question and Teacher")

ggsave(here("Midterm Histograms by Teacher and Question.png"), plot = Plot1, device = NULL, path = NULL,
       width = 8, height = 10, units = c("in", "cm", "mm"),
       dpi = 600)
alt text here

alt text here

What can we see from the above? Nothing too surprising. Most we can see that Brindley’s students overall did better because none of them scored lower than a 5/10 on Question 2 or lower than a 6/14 on Question 3, whereas students from other classes achieved a broader range of (worse) scores.

Individually all we can take from this is perhaps to adjust our approaches. I suspect that indeed some of this difference is the experience level of the students/teachers, but its probably also worth considering our own contributions. I for one see this as clear evidence that I need to teach students more directly to the mark scheme to ensure that they aren’t leaving so many marks on the table (I expect this difference to be even more pronounced in presentations).

Histograms of Grades Given by Marker and Question

Plot2<- 
  ggplot(data=LimitedGrades, aes(x=GradeF, fill = Marker)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  facet_wrap(Marker ~ Question, scales = "free_x", ncol = 2)+
  ggtitle("AS Global Perspectives - Histograms of Grades by Question and Marker")

ggsave(here("Midterm Histograms by Marker and Question.png"), plot = Plot2, device = NULL, path = NULL,
       width = 12, height = 10, units = c("in", "cm", "mm"),
       dpi = 600)
alt text here

alt text here

We can also look at what grades we each gave out the most commonly. Again I’m not certain what we should take from this, as we all nly worked with relatively small samples of papers, but perhaps some of you might spot things about your own practice here. For example on question 2 I gave out many scores of 7 - maybe I wasn’t the best or most confident at differentiating there (and probably this suggests more students from that should have been given a 6). (The same goes for Brindley who gave many 9s on Question 3 - it seems likely some of those 9s should probably be 8s instead)

The other thing to note is that we didn’t give a single score of 10 on Question 2, and only Suzana gave a score higher than 12/14 for Question 3. This might suggest we graded fairly accurately relative to the Cambridge Mark Scheme, or it might suggest we have too high of expectations for the best performance. You’ll all have a much better idea about that than I will

Grade Boundary Adjustments

There are many ways we could assign grade boundaries, depending on how generous we are feeling. The simplest would be to use the same grade boundaries the school usually uses: A star is 90, A is 80, B is 70, and so on and so forth. If we do that let’s take a look at where the grades would fall

AStars1 <- subset(MidtermTotals, Grade > 89.99)
AStars1$Boundary1 <- "A*"

As1 <- subset(MidtermTotals, Grade < 89.99 & Grade > 79.99)
As1$Boundary1 <- "A"

Bs1 <- subset(MidtermTotals, Grade < 79.99 & Grade > 69.99)
Bs1$Boundary1 <- "B"

Cs1 <- subset(MidtermTotals, Grade < 69.99 & Grade > 59.99)
Cs1$Boundary1 <- "C"

Ds1 <- subset(MidtermTotals, Grade < 59.99 & Grade > 49.99)
Ds1$Boundary1 <- "D"

Es1 <- subset(MidtermTotals, Grade < 49.99 & Grade > 39.99)
Es1$Boundary1 <- "E"

Us1 <- subset(MidtermTotals, Grade < 39.99)
Us1$Boundary1 <- "U"

MidtermTotals2 <- rbind.data.frame(AStars1, As1, Bs1, Cs1, Ds1, Es1, Us1)
MidtermTotals2$Boundary1 <- factor(MidtermTotals2$Boundary1, levels = c("U", "E", "D", "C", "B", "A", "A*"))

  ggplot(data=MidtermTotals2, aes(x=Boundary1)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  ggtitle("AS Global Perspectives - Grade Boundaries")

ggsave(here("AS Global Perspectives- Histogram of Grade Boundaries.png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 6, units = c("in", "cm", "mm"),
       dpi = 600)

So that’s actually a pretty normal distribution of Letter Grades centered on C/B - approximately where you normally want a grade distribution to be, but maybe with too many U and E scores. Lets compare it to the distribution from last year.

#2019
GP2019 <- read.csv(here("AGPGrades2019.csv"))
GP2019<- subset(GP2019, MTLetter != "")
GP2019$MTLetter <- factor(GP2019$MTLetter, levels = c("U", "E", "D", "C", "B", "A"))

  ggplot(data=GP2019, aes(x=MTLetter)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  ggtitle("AS Global Perspectives - Grade Boundaries (2019)")

ggsave(here("AS Global Perspectives- Histogram of Grade Boundaries (2019).png"), plot = last_plot(), device = NULL, path = NULL,
       width = 8, height = 6, units = c("in", "cm", "mm"),
       dpi = 600)

So last year’s distribution is pretty similar actually - just with less U and E grades. To see why this is, we should actually take a look at the distribution of raw scores and how they mapped onto letter grades, both for last year and our current unadjusted grades for this year

#2020

GP2019Cut <-
  GP2019 %>%
    mutate(Year = 2019) %>%
    subset(select = c(`DC....`, Year, MTScore, MTLetter)) %>%
    setNames(c("Student", "Year", "Score", "Letter")) 
  
GP2020Cut <-
  MidtermTotals2 %>%
    mutate(Year = 2020) %>%
    subset(select = c(Student, Year, sum, Boundary1)) %>%
    setNames(c("Student", "Year", "Score", "Letter"))

GPTotals <- rbind.data.frame(GP2019Cut, GP2020Cut)

  ggplot(data=GPTotals, aes(x=Score, fill = Letter)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  facet_wrap(~ Year, ncol = 1) +
  theme_alan() +
  ggtitle("AS Global Perspectives - Distribution of Raw Scores and Letter Grades")

So we can see that we were way easier on the marking this year than last year. Although last year we did not award an A* grades. Basically this year we graded such that we ended up with a distribution that looks roughly like the one. Really all we need to do this year is cut out some U and E grades. Lets see how well simply apply a “standard” curve to the grades by turning them into a normal distribution

library(ProfessR)
B = boxplot(MidtermTotals2$sum, plot=FALSE)
divs = c(min(MidtermTotals2$sum), B$stats[1:4] + diff(B$stats)/2, max(MidtermTotals2$sum) )
D1 = do.grades(MidtermTotals2$sum, divs=divs, tit="AS GP Midterm")

## Grade divisions:
## 5
## 14
## 18.5
## 21.5
## 25
## 27
## Letter Grade Distribution:
## 1 A+ 8
## 2 A 4
## 3 A- 3
## 4 B+ 8
## 5 B 8
## 6 B- 7
## 7 C+ 9
## 8 C 9
## 9 C- 10
## 10 D+ 13
## 11 D 7
## 12 D- 11
## 13 F 8
## [1] "Mean Score= 74.7401360544218"

Sadly this doesn’t actually work very well with a standard function because our school uses grade boundaries that are a little bit strange relative to a standard. And I’m not good enough at this to actually write a package to do it. So instead I’m going to log-transform the data to make it normally distributed, then assign grades at the quartiles

MidtermTotals2$LogGrade <- log10(MidtermTotals2$Grade)

ggplot(data=MidtermTotals2, aes(x=LogGrade)) +
  geom_histogram(aes(y=..density..), alpha = 0.6, position = "identity") +
  labs(x="Grade (Log)", y="Density") +
  stat_function(fun=dnorm, args = list(mean=mean(MidtermTotals2$LogGrade), sd=sd(MidtermTotals2$LogGrade)), 
                color="black", size   = 1.4) +
  theme_alan() +
  ggtitle("Normalised Histogram of grades in AS Global Perspectives")

So there we are transformed to something a bit more normal and have picked some grade divisions. What we need however is to assign the grade boundaries.

This turns out to actually be very hard because in addition to the school having weird grade boundaries are data just plain isn’t normally distributed, no matter how we look at it. As such I think we’re better off sticking with the school grading scheme - to do this all we really need to do is bump up our lowest grades so they aren’t all Us and Es. Lets take a look at our grade counts.

count(GP2020Cut$Score) %>%
    knitr::kable(caption = "AAS GP Grade Counts", "html", row.names = F) %>%
    kable_styling(full_width = F) 
AAS GP Grade Counts
x freq
5 3
11 1
12 1
13 3
14 5
15 6
16 7
17 6
18 7
19 10
20 9
21 9
22 7
23 8
24 8
25 3
26 4
27 8

We have a total of 3 students who scored a 5/30 on the exam. I think it’s safe to say those students can stay at a U Grade.

The next 5 students are 11, 12, 13, 13, 13 - which looks like the right number of students to give an E grade to (we gave 6 with a slightly larger class in 2019).

We can then assign our A-stars pretty easily - all students with the highest grade of 27 can have the A-star. There are 8 of them (so our number of A-star matches our sum of E and U students)

I think we can also keep the current A boundary of 24/30, giving us 15 A students

We probably want less Ds than As, so lets go with grades of 14-15 being Ds - which gives us 11 students

That leaves us with 63 students between B and C. We want more Cs than Bs to be in line with 2019 (especially because we have more A/Astar), so lets make the boundary at 20 grades, which gives 39 students a C

So we end up with these for grade divisions (number of scores is in brackets):

A*: >26 (8) A: 24 - 26 (15) B: 21 - 23 (24) C: 16 - 20 (39) D: 14 - 15 (11) E: 11 - 13 (5) U: <10 (3)

Nowe we just need to assign the grades for these, which I do below

AStars1 <- subset(MidtermTotals2, sum > 26)
AStars1$Boundary2 <- "A*"
AStars1$Percentage <- 90

As1 <- subset(MidtermTotals2, sum < 27 & sum > 23)
As1$Boundary2 <- "A"
As1$Percentage <- (As1$sum - 24) * 3.5 + 80

Bs1 <- subset(MidtermTotals2, sum < 24 & sum > 20)
Bs1$Boundary2 <- "B"
Bs1$Percentage <- (Bs1$sum - 21) * 3.5 + 70

Cs1 <- subset(MidtermTotals2, sum < 21 & sum > 15)
Cs1$Boundary2 <- "C"
Cs1$Percentage <- (Cs1$sum - 16) * 2 + 60

Ds1 <- subset(MidtermTotals2, sum < 16 & sum > 13)
Ds1$Boundary2 <- "D"
Ds1$Percentage <- (Ds1$sum - 14) * 5 + 50

Es1 <- subset(MidtermTotals2, sum < 14 & sum > 10)
Es1$Boundary2 <- "E"
Es1$Percentage <- (Es1$sum - 11) * 3.5 + 40

Us1 <- subset(MidtermTotals2, sum < 10)
Us1$Boundary2 <- "U"
Us1$Percentage <- 30

MidtermTotals3 <- 
  rbind.data.frame(AStars1, As1, Bs1, Cs1, Ds1, Es1, Us1) %>%
  arrange(Student)

FinalGrades <-
  MidtermTotals2 %>%
    arrange(Student) %>%
    mutate(`Percentage (r)` = MidtermTotals3$Percentage) %>%
    mutate(Boundary2 = MidtermTotals3$Boundary2) %>%
    subset(select = c(Student, Teacher, sum, `Percentage (r)`, Boundary2)) %>%
    setNames(c("Student", "Teacher", "Score", "Percentage (r)", "Letter Grade")) %>%
    mutate(`Letter Grade` = factor(`Letter Grade`, levels = c("U", "E", "D", "C", "B", "A", "A*")))  %>%
    arrange(-Score)

head(FinalGrades, 10)
## # A tibble: 10 x 5
##    Student           Teacher  Score `Percentage (r)` `Letter Grade`
##    <chr>             <chr>    <int>            <dbl> <fct>         
##  1 Alice Chu         Devin       27               90 A*            
##  2 Chelsea Wang      Suzana      27               90 A*            
##  3 Erica Zhao        Suzana      27               90 A*            
##  4 Grace RenJingJing Madelyn     27               90 A*            
##  5 Jeremy Yin        Brindley    27               90 A*            
##  6 Jessica Guo       Madelyn     27               90 A*            
##  7 Nina Wang         Suzana      27               90 A*            
##  8 Paulin Zhang      Madelyn     27               90 A*            
##  9 Amy An            Suzana      26               87 A             
## 10 Frank  Wu         Alan        26               87 A

So now lets look at the distribution of the grades now

ggplot(data=FinalGrades, aes(x=Score, fill = `Letter Grade`)) +
  geom_histogram(stat = "count", position = pd, width = 0.8) +
  labs(x="Score", y="Count") +
  theme_alan() +
  ggtitle("AS Global Perspectives - Revised Distribution of Letter Grades")

Woohoo - looks pretty good. Lets output this as some spreadsheets

write.csv(FinalGrades, here("AS GP Revised Grades.csv"))

#Alan
write.csv(subset(FinalGrades, Teacher == "Alan"), here("AS GP Revised Grades- Alan.csv"))

#Brindley
write.csv(subset(FinalGrades, Teacher == "Brindley"), here("AS GP Revised Grades- Brindley.csv"))

#Devin
write.csv(subset(FinalGrades, Teacher == "Devin"), here("AS GP Revised Grades- Devin.csv"))

#Madelyn
write.csv(subset(FinalGrades, Teacher == "Madelyn"), here("AS GP Revised Grades- Madelyn.csv"))

#Suzana
write.csv(subset(FinalGrades, Teacher == "Suzana"), here("AS GP Revised Grades- Suzana.csv"))

Conclusions

If you have any questions or comments about student performance in the class, please don’t hesitate to get in touch via email to Alan Nielsen.

This report was generated using R Markdown.