@techreport{4e88ff0e992a4d629d77ae63fb706884,
title = "Personalized Stopping Rules in Bayesian Adaptive Mastery Assessment",
abstract = "We propose a new model to assess the mastery level of a given skill efficiently. The model, called Bayesian Adaptive Mastery Assessment (BAMA), uses information on the accuracy and the response time of the answers given and infers the mastery at every step of the assessment. BAMA balances the length of the assessment and the certainty of the mastery inference by employing a Bayesian decision-theoretic framework adapted to each student. All these properties contribute to a novel approach in assessment models for intelligent learning systems. The purpose of this research is to explore the properties of BAMA and evaluate its performance concerning the number of questions administered and the accuracy of the final mastery estimates across different students. We simulate student performances and establish that the model converges with low variance and high efficiency leading to shorter assessment duration for all students. Considering the experimental results, we expect our approach to avoid the issue of over-practicing and under-practicing and facilitate the development of Learning Analytics tools to support the tutors in the evaluation of learning effects and instructional decision making.",
keywords = "adaptive assessment, performance model, mastery criteria, optimal stopping policy",
author = "Anni Sapountzi and Sandjai Bhulai and Ilja Cornelisz and {van Klaveren}, Chris",
year = "2021",
month = mar,
day = "5",
language = "English",
series = "arXiv.org",
number = "2103.03766",
publisher = "Cornell University - arXiv",
address = "United States",
type = "WorkingPaper",
institution = "Cornell University - arXiv",
}