Commit 64d098e5e622762326a4c0e6b32b769e4742d4af

Authored by Miguel Barão
1 parent a08db79d
Exists in master and in 1 other branch dev

- replace (scale_points, scale_min, scale_max) by "scale: [0, 20]"

- updates demo.yaml
- correct-question.py changed to count the number of correct colors
- catch exception loading test configuration
demo/demo.yaml
1 --- 1 ---
2 # ============================================================================ 2 # ============================================================================
3 -# The test reference should be a unique identifier. It is saved in the database  
4 -# so that queries can be done in the terminal like  
5 -# sqlite3 students.db "select * from tests where ref='demo'" 3 +# Unique identifier of the test.
  4 +# Database queries can be done in the terminal with
  5 +# sqlite3 students.db "select * from tests where ref='tutorial'"
6 ref: tutorial 6 ref: tutorial
7 7
8 -# Database with student credentials and grades of all questions and tests done  
9 -# The database is an sqlite3 file generate with the command initdb 8 +# Database file that includes student credentials, tests and questions grades.
  9 +# It's a sqlite3 database generated with the command 'initdb'
10 database: students.db 10 database: students.db
11 11
12 -# Directory where the tests including submitted answers and grades are stored.  
13 -# The submitted tests and their corrections can be reviewed later. 12 +# Directory where the submitted and corrected test are stored for later review.
14 answers_dir: ans 13 answers_dir: ans
15 14
16 # --- optional settings: ----------------------------------------------------- 15 # --- optional settings: -----------------------------------------------------
17 16
18 -# You may wish to refer the course, year or kind of test 17 +# Title of this test, e.g. course name, year or test number
19 # (default: '') 18 # (default: '')
20 title: Teste de demonstração (tutorial) 19 title: Teste de demonstração (tutorial)
21 20
22 # Duration in minutes. 21 # Duration in minutes.
23 # (0 or undefined means infinite time) 22 # (0 or undefined means infinite time)
24 duration: 2 23 duration: 2
25 -autosubmit: true  
26 24
27 -# Show points for each question, scale 0-20. 25 +# Automatic test submission after the timeout 'duration'?
28 # (default: false) 26 # (default: false)
  27 +autosubmit: true
  28 +
  29 +# Show points for each question (min and max).
  30 +# (default: true)
29 show_points: true 31 show_points: true
30 32
31 -# scale final grade to the interval [scale_min, scale_max]  
32 -# (default: scale to [0,20])  
33 -scale_max: 20  
34 -scale_min: 0  
35 -scale_points: true 33 +# scale final grade to an interval, e.g. [0, 20], keeping the relative weight
  34 +# of the points declared in the questions below.
  35 +# (default: no scaling, just use question points)
  36 +scale: [0, 5]
  37 +
  38 +# DEPRECATED: old version, to be removed
  39 +# scale_max: 20
  40 +# scale_min: 0
  41 +# scale_points: true
36 42
37 # ---------------------------------------------------------------------------- 43 # ----------------------------------------------------------------------------
38 # Base path applied to the questions files and all the scripts 44 # Base path applied to the questions files and all the scripts
@@ -50,7 +56,7 @@ files: @@ -50,7 +56,7 @@ files:
50 # The order is preserved. 56 # The order is preserved.
51 # There are several ways to define each question (explained below). 57 # There are several ways to define each question (explained below).
52 questions: 58 questions:
53 - - tut-test 59 + - ref: tut-test
54 - tut-questions 60 - tut-questions
55 61
56 - tut-radio 62 - tut-radio
demo/questions/correct/correct-question.py
1 #!/usr/bin/env python3 1 #!/usr/bin/env python3
2 2
  3 +'''
  4 +Demonstação de um script de correcção
  5 +'''
  6 +
3 import re 7 import re
4 import sys 8 import sys
5 9
6 -msg1 = '''---  
7 -grade: 1.0  
8 -comments: Muito bem!  
9 -''' 10 +s = sys.stdin.read()
10 11
11 -msg0 = '''  
12 -grade: 0.0  
13 -comments: A resposta correcta é "red green blue".  
14 -''' 12 +ans = set(re.findall(r'[\w]+', s.lower())) # get words in lowercase
  13 +rgb = set(['red', 'green', 'blue']) # the correct answer
15 14
16 -s = sys.stdin.read() 15 +# a nota é o número de cores certas menos o número de erradas
  16 +grade = max(0,
  17 + len(rgb.intersection(ans)) - len(ans.difference(rgb))) / 3
17 18
18 -answer = set(re.findall(r'[\w]+', s.lower())) # get words in lowercase  
19 -rgb_colors = set(['red', 'green', 'blue']) # the correct answer 19 +if ans == rgb:
  20 + print('---\n'
  21 + 'grade: 1.0\n'
  22 + 'comments: Muito bem!')
20 23
21 -if answer == rgb_colors:  
22 - print(msg1)  
23 else: 24 else:
24 - print(msg0) 25 + print('---\n'
  26 + f'grade: {grade}\n'
  27 + 'comments: A resposta correcta é "red green blue".')
demo/questions/generators/generate-question.py
@@ -18,10 +18,11 @@ print(f"""--- @@ -18,10 +18,11 @@ print(f"""---
18 type: text 18 type: text
19 title: Geradores de perguntas 19 title: Geradores de perguntas
20 text: | 20 text: |
21 - Existe a possibilidade da pergunta ser gerada por um programa externo.  
22 - Este programa deve escrever no `stdout` uma pergunta em formato `yaml` como  
23 - os anteriores. Pode também receber argumentos para parametrizar a geração da  
24 - pergunta. Aqui está um exemplo de uma pergunta gerada por um script python: 21 + Existe a possibilidade da pergunta ser gerada por um programa externo. Este
  22 + programa deve escrever no `stdout` uma pergunta em formato `yaml` como nos
  23 + exemplos anteriores. Pode também receber argumentos para parametrizar a
  24 + geração da pergunta. Aqui está um exemplo de uma pergunta gerada por um
  25 + script python:
25 26
26 ```python 27 ```python
27 #!/usr/bin/env python3 28 #!/usr/bin/env python3
perguntations/app.py
@@ -86,7 +86,12 @@ class App(): @@ -86,7 +86,12 @@ class App():
86 self.allowed = set([]) # '0' is hardcoded to allowed elsewhere 86 self.allowed = set([]) # '0' is hardcoded to allowed elsewhere
87 87
88 logger.info('Loading test configuration "%s".', conf["testfile"]) 88 logger.info('Loading test configuration "%s".', conf["testfile"])
89 - testconf = load_yaml(conf['testfile']) 89 + try:
  90 + testconf = load_yaml(conf['testfile'])
  91 + except Exception as exc:
  92 + logger.critical('Error loading test configuration YAML.')
  93 + raise AppException(exc)
  94 +
90 testconf.update(conf) # command line options override configuration 95 testconf.update(conf) # command line options override configuration
91 96
92 # start test factory 97 # start test factory
perguntations/templates/grade.html
@@ -43,11 +43,11 @@ @@ -43,11 +43,11 @@
43 {% if t['state'] == 'FINISHED' %} 43 {% if t['state'] == 'FINISHED' %}
44 <h1>Resultado: 44 <h1>Resultado:
45 <strong>{{ f'{round(t["grade"], 1)}' }}</strong> 45 <strong>{{ f'{round(t["grade"], 1)}' }}</strong>
46 - valores na escala de {{t['scale_min']}} a {{t['scale_max']}}. 46 + valores na escala de {{t['scale'][0]}} a {{t['scale'][1]}}.
47 </h1> 47 </h1>
48 <p>O seu teste foi registado.<br> 48 <p>O seu teste foi registado.<br>
49 Pode fechar o browser e desligar o computador.</p> 49 Pode fechar o browser e desligar o computador.</p>
50 - {% if t['grade'] - t['scale_min'] >= 0.75*(t['scale_max'] - t['scale_min']) %} 50 + {% if t['grade'] - t['scale'][0] >= 0.75*(t['scale'][1] - t['scale'][0]) %}
51 <i class="fas fa-thumbs-up fa-5x text-success" aria-hidden="true"></i> 51 <i class="fas fa-thumbs-up fa-5x text-success" aria-hidden="true"></i>
52 {% end %} 52 {% end %}
53 {% elif t['state'] == 'QUIT' %} 53 {% elif t['state'] == 'QUIT' %}
@@ -78,19 +78,19 @@ @@ -78,19 +78,19 @@
78 <td> <!-- progress column --> 78 <td> <!-- progress column -->
79 <div class="progress" style="height: 20px;"> 79 <div class="progress" style="height: 20px;">
80 <div class="progress-bar 80 <div class="progress-bar
81 - {% if g[1] - t['scale_min'] < 0.5*(t['scale_max'] - t['scale_min']) %} 81 + {% if g[1] - t['scale'][0] < 0.5*(t['scale'][1] - t['scale'][0]) %}
82 bg-danger 82 bg-danger
83 - {% elif g[1] - t['scale_min'] < 0.75*(t['scale_max'] - t['scale_min']) %} 83 + {% elif g[1] - t['scale'][0] < 0.75*(t['scale'][1] - t['scale'][0]) %}
84 bg-warning 84 bg-warning
85 {% else %} 85 {% else %}
86 bg-success 86 bg-success
87 {% end %} 87 {% end %}
88 " 88 "
89 role="progressbar" 89 role="progressbar"
90 - aria-valuenow="{{ round(100*(g[1] - t['scale_min'])/(t['scale_max'] - t['scale_min'])) }}" 90 + aria-valuenow="{{ 100*(g[1] - t['scale'][0])/(t['scale'][1] - t['scale'][0]) }}"
91 aria-valuemin="0" 91 aria-valuemin="0"
92 aria-valuemax="100" 92 aria-valuemax="100"
93 - style="min-width: 2em; width: {{ round(100*(g[1]-t['scale_min'])/(t['scale_max']-t['scale_min'])) }}%;" 93 + style="min-width: 2em; width: {{ 100*(g[1]-t['scale'][0])/(t['scale'][1]-t['scale'][0]) }}%;"
94 > 94 >
95 95
96 {{ str(round(g[1], 1)) }} 96 {{ str(round(g[1], 1)) }}
perguntations/test.py
@@ -42,10 +42,8 @@ class TestFactory(dict): @@ -42,10 +42,8 @@ class TestFactory(dict):
42 # --- set test defaults and then use given configuration 42 # --- set test defaults and then use given configuration
43 super().__init__({ # defaults 43 super().__init__({ # defaults
44 'title': '', 44 'title': '',
45 - 'show_points': False,  
46 - 'scale_points': True,  
47 - 'scale_max': 20.0,  
48 - 'scale_min': 0.0, 45 + 'show_points': True,
  46 + 'scale': None, # or [0, 20]
49 'duration': 0, # 0=infinite 47 'duration': 0, # 0=infinite
50 'autosubmit': False, 48 'autosubmit': False,
51 'debug': False, 49 'debug': False,
@@ -197,11 +195,12 @@ class TestFactory(dict): @@ -197,11 +195,12 @@ class TestFactory(dict):
197 195
198 def check_grade_scaling(self): 196 def check_grade_scaling(self):
199 '''Just informs the scale limits''' 197 '''Just informs the scale limits'''
200 - if self['scale_points']:  
201 - smin, smax = self["scale_min"], self["scale_max"]  
202 - logger.info('Grades will be scaled to [%g, %g]', smin, smax)  
203 - else:  
204 - logger.info('Grades are not being scaled.') 198 + if 'scale_points' in self:
  199 + msg = ('*** DEPRECATION WARNING: *** scale_points, scale_min, '
  200 + 'scale_max were replaced by "scale: [min, max]".')
  201 + logger.warning(msg)
  202 + self['scale'] = [self['scale_min'], self['scale_max']]
  203 +
205 204
206 # ------------------------------------------------------------------------ 205 # ------------------------------------------------------------------------
207 def sanity_checks(self): 206 def sanity_checks(self):
@@ -254,25 +253,32 @@ class TestFactory(dict): @@ -254,25 +253,32 @@ class TestFactory(dict):
254 253
255 test.append(question) 254 test.append(question)
256 255
257 - # normalize question points to scale  
258 - if self['scale_points']:  
259 - total_points = sum(q['points'] for q in test)  
260 - if total_points == 0:  
261 - logger.warning('Can\'t scale, total points in the test is 0!')  
262 - else:  
263 - scale = (self['scale_max'] - self['scale_min']) / total_points 256 + # setup scale
  257 + total_points = sum(q['points'] for q in test)
  258 +
  259 + if total_points > 0:
  260 + # normalize question points to scale
  261 + if self['scale'] is not None:
  262 + scale_min, scale_max = self['scale']
264 for question in test: 263 for question in test:
265 - question['points'] *= scale 264 + question['points'] *= (scale_max - scale_min) / total_points
  265 + else:
  266 + self['scale'] = [0, total_points]
  267 + else:
  268 + logger.warning('Total points is **ZERO**.')
  269 + if self['scale'] is None:
  270 + self['scale'] = [0, 20]
266 271
267 if nerr > 0: 272 if nerr > 0:
268 logger.error('%s errors found!', nerr) 273 logger.error('%s errors found!', nerr)
269 274
  275 + # these will be copied to the test instance
270 inherit = {'ref', 'title', 'database', 'answers_dir', 276 inherit = {'ref', 'title', 'database', 'answers_dir',
271 'questions_dir', 'files', 277 'questions_dir', 'files',
272 'duration', 'autosubmit', 278 'duration', 'autosubmit',
273 - 'scale_min', 'scale_max', 'show_points', 279 + 'scale', 'show_points',
274 'show_ref', 'debug', } 280 'show_ref', 'debug', }
275 - # NOT INCLUDED: scale_points, testfile, allow_all, review 281 + # NOT INCLUDED: testfile, allow_all, review
276 282
277 return Test({ 283 return Test({
278 **{'student': student, 'questions': test}, 284 **{'student': student, 'questions': test},
@@ -318,6 +324,7 @@ class Test(dict): @@ -318,6 +324,7 @@ class Test(dict):
318 '''Corrects all the answers of the test and computes the final grade''' 324 '''Corrects all the answers of the test and computes the final grade'''
319 self['finish_time'] = datetime.now() 325 self['finish_time'] = datetime.now()
320 self['state'] = 'FINISHED' 326 self['state'] = 'FINISHED'
  327 +
321 grade = 0.0 328 grade = 0.0
322 for question in self['questions']: 329 for question in self['questions']:
323 await question.correct_async() 330 await question.correct_async()
@@ -325,8 +332,8 @@ class Test(dict): @@ -325,8 +332,8 @@ class Test(dict):
325 logger.debug('Correcting %30s: %3g%%', 332 logger.debug('Correcting %30s: %3g%%',
326 question["ref"], question["grade"]*100) 333 question["ref"], question["grade"]*100)
327 334
328 - # truncate to avoid negative grade and adjust scale  
329 - self['grade'] = max(0.0, grade) + self['scale_min'] 335 + # truncate to avoid negative final grade and adjust scale
  336 + self['grade'] = max(0.0, grade) + self['scale'][0]
330 return self['grade'] 337 return self['grade']
331 338
332 # ------------------------------------------------------------------------ 339 # ------------------------------------------------------------------------