@inproceedings{60bab9f01f8f455f962c0323faa87074,
title = "Automatic conversational assessment using large language model technology",
abstract = "Student evaluation is an important, yet costly, part of instruction. Traditional exams are a burden for teachers and stressful for students. This paper uses a large language model (LLM) technology to create a system for Automated Conversational Assessment, ACA, where a dialog system, based on content and intended learning outcomes, interviews the student to determine the level of learning. In a pilot experiment in a university course, we found that the ACA system scores correlate with the grades given by a human and also have a positive correlation with the results of a conventional exam of the same students. Based on a questionnaire study, the students responded that the assessment was perceived to be fair and acceptable.",
author = "Jan Bergerhoff and Johannes Bendler and Stefan Stefanov and Enrico Cavinato and Leonard Esser and Tommy Tran and Aki H{\"a}rm{\"a}",
year = "2025",
month = jan,
day = "21",
doi = "10.1145/3702163.3702169",
language = "Undefined/Unknown",
isbn = "9798400717819",
series = "Proceedings of the International Conference on Education Technology and Computers",
publisher = "Association for Computing Machinery",
pages = "39--45",
booktitle = "Proceedings of the 2024 16th International Conference on Education Technology and Computers",
address = "United States",
}