test.py
10.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
# python standard library
from os import path, listdir
import sys, fnmatch
import random
from datetime import datetime
import json
import logging
import asyncio
# this project
import questionfactory as questions
# Logger configuration
logger = logging.getLogger(__name__)
# ===========================================================================
class TestFactoryException(Exception):
pass
# ===========================================================================
# Each instance of TestFactory() is a test generator.
# For example, if we want to serve two different tests, then we need two
# instances of TestFactory(), one for each test.
# ===========================================================================
class TestFactory(dict):
# -----------------------------------------------------------------------
# loads configuration from yaml file, then updates (overriding)
# some configurations using the conf argument.
# base questions are loaded from files into a pool.
# -----------------------------------------------------------------------
def __init__(self, conf):
super().__init__(conf)
# set defaults and sanity checks
self.sanity_checks()
if conf['review']:
logger.info('Review mode. No questions loaded.')
return
# loads yaml files to question_factory
self.question_factory = questions.QuestionFactory()
self.question_factory.load_files(files=self['files'], questions_dir=self['questions_dir'])
# check if all questions exist ('ref' keys are correct?)
errors_found = False
for q in self['questions']:
for r in q['ref']:
logger.info(f'Checking question "{r}".')
try:
self.question_factory.generate(r)
# except questions.QuestionFactoryException:
# logger.critical(f'Can\'t generate question "{r}".')
except:
logger.critical(f'Can\'t generate question "{r}".')
errors_found = True
if errors_found:
logger.critical('Errors found while generating questions.')
raise TestFactoryException()
logger.info(f'Test factory ready for "{self["ref"]}".')
# -----------------------------------------------------------------------
# Checks for valid keys and sets default values.
# Also checks if some files and directories exist
# -----------------------------------------------------------------------
def sanity_checks(self):
# --- database
if 'database' not in self:
logger.critical('Missing "database" in configuration.')
raise TestFactoryException()
elif not path.isfile(path.expanduser(self['database'])):
logger.critical(f'Can\'t find database {self["database"]}.')
raise TestFactoryException()
# --- answers_dir
if 'answers_dir' not in self:
logger.critical('Missing "answers_dir".')
raise TestFactoryException()
try: # check if answers_dir is a writable directory
f = open(path.join(path.expanduser(self['answers_dir']),'REMOVE-ME'), 'w')
except OSError:
logger.critical(f'Cannot write answers to "{self["answers_dir"]}".')
raise TestFactoryException()
else:
with f:
f.write('You can safely remove this file.')
# --- ref
if 'ref' not in self:
logger.warning('Missing "ref". Will use filename.')
self['ref'] = self['filename']
# --- questions_dir
if 'questions_dir' not in self:
logger.warning(f'Missing "questions_dir". Using {path.abspath(path.curdir)}')
self['questions_dir'] = path.curdir
elif not path.isdir(path.expanduser(self['questions_dir'])):
logger.critical(f'Can\'t find questions directory "{self["questions_dir"]}"')
raise TestFactoryException()
# --- files
if 'files' not in self:
logger.warning('Missing "files" key. Loading all YAML files from "questions_dir"... DANGEROUS!!!')
try:
self['files'] = fnmatch.filter(listdir(self['questions_dir']), '*.yaml')
except OSError:
logger.critical('Couldn\'t get list of YAML question files.')
raise TestFactoryException()
if isinstance(self['files'], str):
self['files'] = [self['files']]
# --- questions
if 'questions' not in self:
logger.critical(f'Missing "questions" in {self["filename"]}.')
raise TestFactoryException()
# normalize questions to a list of dictionaries
for i, q in enumerate(self['questions']):
# normalize question to a dict and ref to a list of references
if isinstance(q, str):
q = {'ref': [q]}
elif isinstance(q, dict) and isinstance(q['ref'], str):
q['ref'] = [q['ref']]
self['questions'][i] = q
# --- defaults for optional keys
self.setdefault('title', '')
# self.setdefault('show_hints', False) # FIXME not implemented yet
self.setdefault('show_points', False)
self.setdefault('scale_points', True)
self.setdefault('scale_max', 20.0)
self.setdefault('duration', 0) # FIXME unused
self.setdefault('debug', False)
self.setdefault('show_ref', False)
# -----------------------------------------------------------------------
# Given a dictionary with a student id {'name':'john', 'number': 123}
# returns instance of Test() for that particular student
# -----------------------------------------------------------------------
async def generate(self, student):
test = []
total_points = 0.0
n = 1
loop = asyncio.get_running_loop()
for qq in self['questions']:
# generate Question() selected randomly from list of references
qref = random.choice(qq['ref'])
try:
q = await loop.run_in_executor(None, self.question_factory.generate, qref)
except:
logger.error(f'Can\'t generate question "{qref}". Skipping.')
continue
# some defaults
if q['type'] in ('information', 'success', 'warning', 'alert'):
q['points'] = qq.get('points', 0.0)
else:
q['points'] = qq.get('points', 1.0)
q['number'] = n
n += 1
total_points += q['points']
test.append(q)
# normalize question points to scale
if self['scale_points']:
for q in test:
q['points'] *= self['scale_max'] / total_points
return Test({
'ref': self['ref'],
'title': self['title'], # title of the test
'student': student, # student id
'questions': test, # list of questions
'answers_dir': self['answers_dir'],
# FIXME which ones are required?
# 'show_hints': self['show_hints'],
'show_points': self['show_points'],
'show_ref': self['show_ref'],
'debug': self['debug'], # required by template test.html
'database': self['database'],
'questions_dir': self['questions_dir'],
'files': self['files'],
})
# -----------------------------------------------------------------------
# def __repr__(self):
# return '{\n' + '\n'.join(' {0:14s}: {1}'.format(k, v) for k,v in self.items()) + '\n}'
# ===========================================================================
# Each instance of the Test() class is a concrete test to be answered by
# a single student. It must/will contain at least these keys:
# start_time, finish_time, questions, grade [0,20]
# Note: for the save_json() function other keys are required
# Note: grades are rounded to 1 decimal point: 0.0 - 20.0
# ===========================================================================
class Test(dict):
# -----------------------------------------------------------------------
def __init__(self, d):
super().__init__(d)
self['start_time'] = datetime.now()
self['finish_time'] = None
self['state'] = 'ONGOING'
self['comment'] = ''
logger.info(f'Student {self["student"]["number"]}: starting test.')
# -----------------------------------------------------------------------
# Removes all answers from the test (clean)
def reset_answers(self):
for q in self['questions']:
q['answer'] = None
logger.info(f'Student {self["student"]["number"]}: all answers cleared.')
# -----------------------------------------------------------------------
# Given a dictionary ans={index: 'some answer'} updates the
# answers of the test. Only affects questions referred.
def update_answers(self, ans):
for i in ans:
self['questions'][i]['answer'] = ans[i]
logger.info(f'Student {self["student"]["number"]}: {len(ans)} answers updated.')
# -----------------------------------------------------------------------
# Corrects all the answers and computes the final grade
async def correct(self):
self['finish_time'] = datetime.now()
self['state'] = 'FINISHED'
grade = 0.0
for q in self['questions']:
grade += await q.correct_async() * q['points']
self['grade'] = max(0, round(grade, 1)) # avoid negative final grades
logger.info(f'Student {self["student"]["number"]}: correction gave {self["grade"]} points.')
return self['grade']
# -----------------------------------------------------------------------
def giveup(self):
self['finish_time'] = datetime.now()
self['state'] = 'QUIT'
self['grade'] = 0.0
logger.info(f'Student {self["student"]["number"]}: gave up.')
return self['grade']
# -----------------------------------------------------------------------
def save_json(self, filepath):
with open(path.expanduser(filepath), 'w') as f:
json.dump(self, f, indent=2, default=str) # HACK default=str required for datetime objects
logger.info(f'Student {self["student"]["number"]}: saved JSON file.')