test.py
13.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
'''
TestFactory - generates tests for students
Test - instances of this class are individual tests
'''
# python standard library
from os import path
import random
from datetime import datetime
import logging
# this project
from perguntations.questions import QFactory, QuestionException
from perguntations.tools import load_yaml
# Logger configuration
logger = logging.getLogger(__name__)
# ============================================================================
class TestFactoryException(Exception):
'''exception raised in this module'''
# ============================================================================
class TestFactory(dict):
'''
Each instance of TestFactory() is a test generator.
For example, if we want to serve two different tests, then we need two
instances of TestFactory(), one for each test.
'''
# ------------------------------------------------------------------------
def __init__(self, conf):
'''
Loads configuration from yaml file, then overrides some configurations
using the conf argument.
Base questions are added to a pool of questions factories.
'''
# --- set test defaults and then use given configuration
super().__init__({ # defaults
'title': '',
'show_points': True,
'scale': None, # or [0, 20]
'duration': 0, # 0=infinite
'autosubmit': False,
'debug': False,
'show_ref': False
})
self.update(conf)
# --- perform sanity checks and normalize the test questions
self.sanity_checks()
logger.info('Sanity checks PASSED.')
# --- find refs of all questions used in the test
qrefs = {r for qq in self['questions'] for r in qq['ref']}
logger.info('Declared %d questions (each test uses %d).',
len(qrefs), len(self["questions"]))
# --- for review, we are done. no factories needed
if self['review']:
logger.info('Review mode. No questions loaded. No factories.')
return
# --- load and build question factories
self.question_factory = {}
counter = 1
for file in self["files"]:
fullpath = path.normpath(path.join(self["questions_dir"], file))
(dirname, filename) = path.split(fullpath)
logger.info('Loading "%s"...', fullpath)
questions = load_yaml(fullpath) # , default=[])
for i, question in enumerate(questions):
# make sure every question in the file is a dictionary
if not isinstance(question, dict):
msg = f'Question {i} in {file} is not a dictionary'
raise TestFactoryException(msg)
# check if ref is missing, then set to '/path/file.yaml:3'
if 'ref' not in question:
question['ref'] = f'{file}:{i:04}'
logger.warning('Missing ref set to "%s"', question["ref"])
# check for duplicate refs
if question['ref'] in self.question_factory:
other = self.question_factory[question['ref']]
otherfile = path.join(other.question['path'],
other.question['filename'])
msg = (f'Duplicate reference "{question["ref"]}" in files '
f'"{otherfile}" and "{fullpath}".')
raise TestFactoryException(msg)
# make factory only for the questions used in the test
if question['ref'] in qrefs:
question.setdefault('type', 'information')
question.update({
'filename': filename,
'path': dirname,
'index': i # position in the file, 0 based
})
self.question_factory[question['ref']] = QFactory(question)
# check if all the questions can be correctly generated
try:
self.question_factory[question['ref']].generate()
except Exception:
msg = f'Failed to generate "{question["ref"]}"'
raise TestFactoryException(msg)
else:
logger.info('%4d. "%s" Ok.', counter, question["ref"])
counter += 1
qmissing = qrefs.difference(set(self.question_factory.keys()))
if qmissing:
raise TestFactoryException(f'Could not find questions {qmissing}.')
# ------------------------------------------------------------------------
def check_missing_ref(self):
'''Test must have a `ref`'''
if 'ref' not in self:
raise TestFactoryException('Missing "ref" in configuration!')
def check_missing_database(self):
'''Test must have a database'''
if 'database' not in self:
raise TestFactoryException('Missing "database" in configuration')
if not path.isfile(path.expanduser(self['database'])):
msg = f'Database "{self["database"]}" not found!'
raise TestFactoryException(msg)
def check_missing_answers_directory(self):
'''Test must have a answers directory'''
if 'answers_dir' not in self:
msg = 'Missing "answers_dir" in configuration'
raise TestFactoryException(msg)
def check_answers_directory_writable(self):
'''Answers directory must be writable'''
testfile = path.join(path.expanduser(self['answers_dir']), 'REMOVE-ME')
try:
with open(testfile, 'w') as file:
file.write('You can safely remove this file.')
except OSError:
msg = f'Cannot write answers to directory "{self["answers_dir"]}"'
raise TestFactoryException(msg)
def check_questions_directory(self):
'''Check if questions directory is missing or not accessible.'''
if 'questions_dir' not in self:
logger.warning('Missing "questions_dir". Using "%s"',
path.abspath(path.curdir))
self['questions_dir'] = path.curdir
elif not path.isdir(path.expanduser(self['questions_dir'])):
raise TestFactoryException(f'Can\'t find questions directory '
f'"{self["questions_dir"]}"')
def check_import_files(self):
'''Check if there are files to import (with questions)'''
if 'files' not in self:
msg = ('Missing "files" in configuration with the list of '
'question files to import!')
raise TestFactoryException(msg)
if isinstance(self['files'], str):
self['files'] = [self['files']]
def check_question_list(self):
'''normalize question list'''
if 'questions' not in self:
raise TestFactoryException('Missing "questions" in configuration')
for i, question in enumerate(self['questions']):
# normalize question to a dict and ref to a list of references
if isinstance(question, str): # e.g., - some_ref
question = {'ref': [question]} # becomes - ref: [some_ref]
elif isinstance(question, dict) and isinstance(question['ref'], str):
question['ref'] = [question['ref']]
elif isinstance(question, list):
question = {'ref': [str(a) for a in question]}
self['questions'][i] = question
def check_missing_title(self):
'''Warns if title is missing'''
if not self['title']:
logger.warning('Title is undefined!')
def check_grade_scaling(self):
'''Just informs the scale limits'''
if 'scale_points' in self:
msg = ('*** DEPRECATION WARNING: *** scale_points, scale_min, '
'scale_max were replaced by "scale: [min, max]".')
logger.warning(msg)
self['scale'] = [self['scale_min'], self['scale_max']]
# ------------------------------------------------------------------------
def sanity_checks(self):
'''
Checks for valid keys and sets default values.
Also checks if some files and directories exist
'''
self.check_missing_ref()
self.check_missing_database()
self.check_missing_answers_directory()
self.check_answers_directory_writable()
self.check_questions_directory()
self.check_import_files()
self.check_question_list()
self.check_missing_title()
self.check_grade_scaling()
# ------------------------------------------------------------------------
async def generate(self, student):
'''
Given a dictionary with a student dict {'name':'john', 'number': 123}
returns instance of Test() for that particular student
'''
# make list of questions
test = []
qnum = 1 # track question number
nerr = 0 # count errors generating questions
for qlist in self['questions']:
# choose one question variant
qref = random.choice(qlist['ref'])
# generate instance of question
try:
question = await self.question_factory[qref].gen_async()
except QuestionException:
logger.error('Can\'t generate question "%s". Skipping.', qref)
nerr += 1
continue
# some defaults
if question['type'] in ('information', 'success', 'warning',
'alert'):
question['points'] = qlist.get('points', 0.0)
else:
question['points'] = qlist.get('points', 1.0)
question['number'] = qnum # counter for non informative panels
qnum += 1
test.append(question)
# setup scale
total_points = sum(q['points'] for q in test)
if total_points > 0:
# normalize question points to scale
if self['scale'] is not None:
scale_min, scale_max = self['scale']
for question in test:
question['points'] *= (scale_max - scale_min) / total_points
else:
self['scale'] = [0, total_points]
else:
logger.warning('Total points is **ZERO**.')
if self['scale'] is None:
self['scale'] = [0, 20]
if nerr > 0:
logger.error('%s errors found!', nerr)
# these will be copied to the test instance
inherit = {'ref', 'title', 'database', 'answers_dir',
'questions_dir', 'files',
'duration', 'autosubmit',
'scale', 'show_points',
'show_ref', 'debug', }
# NOT INCLUDED: testfile, allow_all, review
return Test({
**{'student': student, 'questions': test},
**{k:self[k] for k in inherit}})
# ------------------------------------------------------------------------
def __repr__(self):
testsettings = '\n'.join(f' {k:14s}: {v}' for k, v in self.items())
return '{\n' + testsettings + '\n}'
# ============================================================================
class Test(dict):
'''
Each instance Test() is a concrete test of a single student.
'''
# ------------------------------------------------------------------------
def __init__(self, d):
super().__init__(d)
self['start_time'] = datetime.now()
self['finish_time'] = None
self['state'] = 'ACTIVE'
self['comment'] = ''
# ------------------------------------------------------------------------
def reset_answers(self):
'''Removes all answers from the test (clean)'''
for question in self['questions']:
question['answer'] = None
# ------------------------------------------------------------------------
def update_answers(self, ans):
'''
Given a dictionary ans={'ref': 'some answer'} updates the answers of
the test. Only affects the questions referred in the dictionary.
'''
for ref, answer in ans.items():
self['questions'][ref]['answer'] = answer
# ------------------------------------------------------------------------
async def correct(self):
'''Corrects all the answers of the test and computes the final grade'''
self['finish_time'] = datetime.now()
self['state'] = 'FINISHED'
grade = 0.0
for question in self['questions']:
await question.correct_async()
grade += question['grade'] * question['points']
logger.debug('Correcting %30s: %3g%%',
question["ref"], question["grade"]*100)
# truncate to avoid negative final grade and adjust scale
self['grade'] = max(0.0, grade) + self['scale'][0]
return self['grade']
# ------------------------------------------------------------------------
def giveup(self):
'''Test is marqued as QUIT and is not corrected'''
self['finish_time'] = datetime.now()
self['state'] = 'QUIT'
self['grade'] = 0.0
logger.info('Student %s: gave up.', self["student"]["number"])
return self['grade']