diff --git a/demo/demo.yaml b/demo/demo.yaml index 5a1f205..7ca8ce9 100644 --- a/demo/demo.yaml +++ b/demo/demo.yaml @@ -1,38 +1,44 @@ --- # ============================================================================ -# The test reference should be a unique identifier. It is saved in the database -# so that queries can be done in the terminal like -# sqlite3 students.db "select * from tests where ref='demo'" +# Unique identifier of the test. +# Database queries can be done in the terminal with +# sqlite3 students.db "select * from tests where ref='tutorial'" ref: tutorial -# Database with student credentials and grades of all questions and tests done -# The database is an sqlite3 file generate with the command initdb +# Database file that includes student credentials, tests and questions grades. +# It's a sqlite3 database generated with the command 'initdb' database: students.db -# Directory where the tests including submitted answers and grades are stored. -# The submitted tests and their corrections can be reviewed later. +# Directory where the submitted and corrected test are stored for later review. answers_dir: ans # --- optional settings: ----------------------------------------------------- -# You may wish to refer the course, year or kind of test +# Title of this test, e.g. course name, year or test number # (default: '') title: Teste de demonstração (tutorial) # Duration in minutes. # (0 or undefined means infinite time) duration: 2 -autosubmit: true -# Show points for each question, scale 0-20. +# Automatic test submission after the timeout 'duration'? # (default: false) +autosubmit: true + +# Show points for each question (min and max). +# (default: true) show_points: true -# scale final grade to the interval [scale_min, scale_max] -# (default: scale to [0,20]) -scale_max: 20 -scale_min: 0 -scale_points: true +# scale final grade to an interval, e.g. [0, 20], keeping the relative weight +# of the points declared in the questions below. +# (default: no scaling, just use question points) +scale: [0, 5] + +# DEPRECATED: old version, to be removed +# scale_max: 20 +# scale_min: 0 +# scale_points: true # ---------------------------------------------------------------------------- # Base path applied to the questions files and all the scripts @@ -50,7 +56,7 @@ files: # The order is preserved. # There are several ways to define each question (explained below). questions: - - tut-test + - ref: tut-test - tut-questions - tut-radio diff --git a/demo/questions/correct/correct-question.py b/demo/questions/correct/correct-question.py index 8319a2f..df55039 100755 --- a/demo/questions/correct/correct-question.py +++ b/demo/questions/correct/correct-question.py @@ -1,24 +1,27 @@ #!/usr/bin/env python3 +''' +Demonstação de um script de correcção +''' + import re import sys -msg1 = '''--- -grade: 1.0 -comments: Muito bem! -''' +s = sys.stdin.read() -msg0 = ''' -grade: 0.0 -comments: A resposta correcta é "red green blue". -''' +ans = set(re.findall(r'[\w]+', s.lower())) # get words in lowercase +rgb = set(['red', 'green', 'blue']) # the correct answer -s = sys.stdin.read() +# a nota é o número de cores certas menos o número de erradas +grade = max(0, + len(rgb.intersection(ans)) - len(ans.difference(rgb))) / 3 -answer = set(re.findall(r'[\w]+', s.lower())) # get words in lowercase -rgb_colors = set(['red', 'green', 'blue']) # the correct answer +if ans == rgb: + print('---\n' + 'grade: 1.0\n' + 'comments: Muito bem!') -if answer == rgb_colors: - print(msg1) else: - print(msg0) + print('---\n' + f'grade: {grade}\n' + 'comments: A resposta correcta é "red green blue".') diff --git a/demo/questions/generators/generate-question.py b/demo/questions/generators/generate-question.py index 36b8082..3161ce4 100755 --- a/demo/questions/generators/generate-question.py +++ b/demo/questions/generators/generate-question.py @@ -18,10 +18,11 @@ print(f"""--- type: text title: Geradores de perguntas text: | - Existe a possibilidade da pergunta ser gerada por um programa externo. - Este programa deve escrever no `stdout` uma pergunta em formato `yaml` como - os anteriores. Pode também receber argumentos para parametrizar a geração da - pergunta. Aqui está um exemplo de uma pergunta gerada por um script python: + Existe a possibilidade da pergunta ser gerada por um programa externo. Este + programa deve escrever no `stdout` uma pergunta em formato `yaml` como nos + exemplos anteriores. Pode também receber argumentos para parametrizar a + geração da pergunta. Aqui está um exemplo de uma pergunta gerada por um + script python: ```python #!/usr/bin/env python3 diff --git a/perguntations/app.py b/perguntations/app.py index deeac3c..b170e03 100644 --- a/perguntations/app.py +++ b/perguntations/app.py @@ -86,7 +86,12 @@ class App(): self.allowed = set([]) # '0' is hardcoded to allowed elsewhere logger.info('Loading test configuration "%s".', conf["testfile"]) - testconf = load_yaml(conf['testfile']) + try: + testconf = load_yaml(conf['testfile']) + except Exception as exc: + logger.critical('Error loading test configuration YAML.') + raise AppException(exc) + testconf.update(conf) # command line options override configuration # start test factory diff --git a/perguntations/templates/grade.html b/perguntations/templates/grade.html index 48d6caa..eb3863f 100644 --- a/perguntations/templates/grade.html +++ b/perguntations/templates/grade.html @@ -43,11 +43,11 @@ {% if t['state'] == 'FINISHED' %}

Resultado: {{ f'{round(t["grade"], 1)}' }} - valores na escala de {{t['scale_min']}} a {{t['scale_max']}}. + valores na escala de {{t['scale'][0]}} a {{t['scale'][1]}}.

O seu teste foi registado.
Pode fechar o browser e desligar o computador.

- {% if t['grade'] - t['scale_min'] >= 0.75*(t['scale_max'] - t['scale_min']) %} + {% if t['grade'] - t['scale'][0] >= 0.75*(t['scale'][1] - t['scale'][0]) %} {% end %} {% elif t['state'] == 'QUIT' %} @@ -78,19 +78,19 @@
{{ str(round(g[1], 1)) }} diff --git a/perguntations/test.py b/perguntations/test.py index 3efba48..e2d7ed1 100644 --- a/perguntations/test.py +++ b/perguntations/test.py @@ -42,10 +42,8 @@ class TestFactory(dict): # --- set test defaults and then use given configuration super().__init__({ # defaults 'title': '', - 'show_points': False, - 'scale_points': True, - 'scale_max': 20.0, - 'scale_min': 0.0, + 'show_points': True, + 'scale': None, # or [0, 20] 'duration': 0, # 0=infinite 'autosubmit': False, 'debug': False, @@ -197,11 +195,12 @@ class TestFactory(dict): def check_grade_scaling(self): '''Just informs the scale limits''' - if self['scale_points']: - smin, smax = self["scale_min"], self["scale_max"] - logger.info('Grades will be scaled to [%g, %g]', smin, smax) - else: - logger.info('Grades are not being scaled.') + if 'scale_points' in self: + msg = ('*** DEPRECATION WARNING: *** scale_points, scale_min, ' + 'scale_max were replaced by "scale: [min, max]".') + logger.warning(msg) + self['scale'] = [self['scale_min'], self['scale_max']] + # ------------------------------------------------------------------------ def sanity_checks(self): @@ -254,25 +253,32 @@ class TestFactory(dict): test.append(question) - # normalize question points to scale - if self['scale_points']: - total_points = sum(q['points'] for q in test) - if total_points == 0: - logger.warning('Can\'t scale, total points in the test is 0!') - else: - scale = (self['scale_max'] - self['scale_min']) / total_points + # setup scale + total_points = sum(q['points'] for q in test) + + if total_points > 0: + # normalize question points to scale + if self['scale'] is not None: + scale_min, scale_max = self['scale'] for question in test: - question['points'] *= scale + question['points'] *= (scale_max - scale_min) / total_points + else: + self['scale'] = [0, total_points] + else: + logger.warning('Total points is **ZERO**.') + if self['scale'] is None: + self['scale'] = [0, 20] if nerr > 0: logger.error('%s errors found!', nerr) + # these will be copied to the test instance inherit = {'ref', 'title', 'database', 'answers_dir', 'questions_dir', 'files', 'duration', 'autosubmit', - 'scale_min', 'scale_max', 'show_points', + 'scale', 'show_points', 'show_ref', 'debug', } - # NOT INCLUDED: scale_points, testfile, allow_all, review + # NOT INCLUDED: testfile, allow_all, review return Test({ **{'student': student, 'questions': test}, @@ -318,6 +324,7 @@ class Test(dict): '''Corrects all the answers of the test and computes the final grade''' self['finish_time'] = datetime.now() self['state'] = 'FINISHED' + grade = 0.0 for question in self['questions']: await question.correct_async() @@ -325,8 +332,8 @@ class Test(dict): logger.debug('Correcting %30s: %3g%%', question["ref"], question["grade"]*100) - # truncate to avoid negative grade and adjust scale - self['grade'] = max(0.0, grade) + self['scale_min'] + # truncate to avoid negative final grade and adjust scale + self['grade'] = max(0.0, grade) + self['scale'][0] return self['grade'] # ------------------------------------------------------------------------ -- libgit2 0.21.2