diff --git a/demo/demo.yaml b/demo/demo.yaml index 3045671..d70cc2c 100644 --- a/demo/demo.yaml +++ b/demo/demo.yaml @@ -14,9 +14,6 @@ database: students.db # Directory where the submitted and corrected test are stored for later review. answers_dir: ans -# Server used to compile & execute code -jobe_server: 192.168.1.85 - # --- optional settings: ----------------------------------------------------- # Title of this test, e.g. course name, year or test number @@ -35,49 +32,52 @@ autosubmit: false # shown to the student. If false, the test is saved but not corrected. # No grade is shown to the student. # (default: true) -autocorrect: true +autocorrect: false # Show points for each question (min and max). # (default: true) show_points: true -# scale final grade to an interval, e.g. [0, 20], keeping the relative weight -# of the points declared in the questions below. +# Scale the points of the questions so that the final grade is in the given +# interval. # (default: no scaling, just use question points) -scale: [0, 5] +scale: [0, 20] # ---------------------------------------------------------------------------- -# Base path applied to the questions files and all the scripts -# including question generators and correctors. -# Either absolute path or relative to current directory can be used. -questions_dir: . - -# (optional) List of files containing questions in yaml format. -# Selected questions will be obtained from these files. -# If undefined, all yaml files in questions_dir are loaded (not recommended). +# Files to import. Each file contains a list of questions in yaml format. files: - questions/questions-tutorial.yaml # This is the list of questions that will make up the test. # The order is preserved. -# There are several ways to define each question (explained below). +# Each question is a dictionary with a question `ref` or a list of `ref`. +# If a list is given, one question will be choosen randomly to each student. +# The `points` for each question is optional and is 1.0 by default for normal +# questions. Informative type of "questions" will have 0.0 points. +# Points are automatically scaled if `scale` key is defined. questions: - ref: tut-test - - tut-questions + - ref: tut-questions + + # these will have 1.0 points + - ref: tut-radio + - ref: tut-checkbox + - ref: tut-text + - ref: tut-text-regex + - ref: tut-numeric-interval - - tut-radio - - tut-checkbox - - tut-text - - tut-text-regex - - tut-numeric-interval + # this question will have 2.0 points - ref: tut-textarea points: 2.0 - - tut-information - - tut-success - - tut-warning - - [tut-alert1, tut-alert2] - - tut-generator - - tut-yamllint - # - tut-code + # these will have 0.0 points: + - ref: tut-information + - ref: tut-success + - ref: tut-warning + + # choose one from the list: + - ref: [tut-alert1, tut-alert2] + + - ref: tut-generator + - ref: tut-yamllint diff --git a/demo/questions/questions-tutorial.yaml b/demo/questions/questions-tutorial.yaml index 4bc9897..9ac3b49 100644 --- a/demo/questions/questions-tutorial.yaml +++ b/demo/questions/questions-tutorial.yaml @@ -20,24 +20,20 @@ database: students.db # base de dados previamente criada com initdb answers_dir: ans # directório onde ficam os testes dos alunos - # opcional + # opcionais duration: 60 # duração da prova em minutos (default: inf) autosubmit: true # submissão automática (default: false) show_points: true # mostra cotação das perguntas (default: true) - scale: [0, 20] # limites inferior e superior da escala (default: [0,20]) - scale_points: true # normaliza cotações para a escala definida - jobe_server: moodle-jobe.uevora.pt # server used to compile & execute code - debug: false # mostra informação de debug no browser + scale: [0, 20] # normaliza cotações para o intervalo indicado. + # não normaliza por defeito (default: None) # -------------------------------------------------------------------------- - questions_dir: ~/topics # raíz da árvore de directórios das perguntas - # Ficheiros de perguntas a importar (relativamente a `questions_dir`) files: - tabelas.yaml - - topic_A/questions.yaml - - topic_B/part_1/questions.yaml - - topic_B/part_2/questions.yaml + - topic1/questions.yaml + - topic2/part1/questions.yaml + - topic2/part2/questions.yaml # -------------------------------------------------------------------------- # Especificação das perguntas do teste e respectivas cotações. @@ -50,13 +46,10 @@ - ref: pergunta2 points: 2.0 - # a cotação é 1.0 por defeito + # por defeinto, a cotação da pergunta é 1.0 valor - ref: pergunta3 - # uma string (não dict), é interpretada como referência - - tabela-auxiliar - - # escolhe aleatoriamente uma das variantes + # escolhe aleatoriamente uma das variantes da pergunta - ref: [pergunta3a, pergunta3b] points: 0.5 @@ -96,8 +89,7 @@ text: | Quando o texto da pergunta tem várias linhas, dá jeito usar o símbolo `|` de pipe, para indicar que tudo o que estiver indentado faz parte do - texto. - É o caso desta pergunta. + texto. É o caso desta pergunta. O texto das perguntas é escrito em `markdown` e suporta fórmulas em LaTeX. @@ -105,8 +97,8 @@ #--------------------------------------------------------------------------- ``` - As chaves são usadas para construir o teste e não se podem repetir, mesmo em - ficheiros diferentes. + As chaves são usadas para construir o teste e não se podem repetir, mesmo + em ficheiros diferentes. De seguida mostram-se exemplos dos vários tipos de perguntas. # ---------------------------------------------------------------------------- diff --git a/mypy.ini b/mypy.ini index 84e7e80..ba94ffc 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,5 +1,7 @@ [mypy] python_version = 3.9 +ignore_missing_imports = True + ; [mypy-setuptools.*] ; ignore_missing_imports = True diff --git a/perguntations/__init__.py b/perguntations/__init__.py index ed98706..026ca93 100644 --- a/perguntations/__init__.py +++ b/perguntations/__init__.py @@ -1,4 +1,4 @@ -# Copyright (C) 2021 Miguel Barão +# Copyright (C) 2022 Miguel Barão # # THE MIT License # @@ -32,10 +32,10 @@ proof of submission and for review. ''' APP_NAME = 'perguntations' -APP_VERSION = '2021.09.dev1' +APP_VERSION = '2022.01.dev1' APP_DESCRIPTION = __doc__ __author__ = 'Miguel Barão' -__copyright__ = 'Copyright 2021, Miguel Barão' +__copyright__ = 'Copyright 2022, Miguel Barão' __license__ = 'MIT license' __version__ = APP_VERSION diff --git a/perguntations/app.py b/perguntations/app.py index c7d9f90..f0de047 100644 --- a/perguntations/app.py +++ b/perguntations/app.py @@ -55,6 +55,7 @@ class App(): # ------------------------------------------------------------------------ def __init__(self, config): + self.debug = config['debug'] self._make_test_factory(config['testfile']) self._db_setup() # setup engine and load all students @@ -126,10 +127,9 @@ class App(): logger.warning('"%s" does not exist', uid) return 'nonexistent' - if uid != '0' and self._students[uid]['state'] != 'allowed': logger.warning('"%s" login not allowed', uid) - return 'not allowed' + return 'not_allowed' if hashed == '': # set password on first login await self.set_password(uid, password) diff --git a/perguntations/main.py b/perguntations/main.py index cf9c00c..3eb7bbb 100644 --- a/perguntations/main.py +++ b/perguntations/main.py @@ -20,7 +20,7 @@ from perguntations.tools import load_yaml from perguntations import APP_NAME, APP_VERSION # ---------------------------------------------------------------------------- -def parse_cmdline_arguments(): +def parse_cmdline_arguments() -> argparse.Namespace: ''' Get command line arguments ''' @@ -40,9 +40,6 @@ def parse_cmdline_arguments(): parser.add_argument('--debug', action='store_true', help='Enable debug messages') - parser.add_argument('--show-ref', - action='store_true', - help='Show question references') parser.add_argument('--review', action='store_true', help='Review mode: doesn\'t generate test') @@ -59,7 +56,6 @@ def parse_cmdline_arguments(): help='Show version information and exit') return parser.parse_args() - # ---------------------------------------------------------------------------- def get_logger_config(debug=False) -> dict: ''' @@ -120,10 +116,9 @@ def main(): # --- start application -------------------------------------------------- config = { 'testfile': args.testfile, - 'debug': args.debug, 'allow_all': args.allow_all, 'allow_list': args.allow_list, - 'show_ref': args.show_ref, + 'debug': args.debug, 'review': args.review, 'correct': args.correct, } diff --git a/perguntations/serve.py b/perguntations/serve.py index cfbbf8c..a1dfe49 100644 --- a/perguntations/serve.py +++ b/perguntations/serve.py @@ -93,6 +93,11 @@ class BaseHandler(tornado.web.RequestHandler): '''simplifies access to the application a little bit''' return self.application.testapp + # @property + # def debug(self) -> bool: + # '''check if is running in debug mode''' + # return self.application.testapp.debug + def get_current_user(self): ''' Since HTTP is stateless, a cookie is used to identify the user. @@ -112,7 +117,7 @@ class LoginHandler(BaseHandler): _prefix = re.compile(r'[a-z]') _error_msg = { 'wrong_password': 'Senha errada', - 'not allowed': 'Não está autorizado a fazer o teste', + 'not_allowed': 'Não está autorizado a fazer o teste', 'nonexistent': 'Número de aluno inválido' } @@ -195,7 +200,7 @@ class RootHandler(BaseHandler): test = self.testapp.get_test(uid) name = self.testapp.get_name(uid) self.render('test.html', t=test, uid=uid, name=name, md=md_to_html, - templ=self._templates) + templ=self._templates, debug=self.testapp.debug) # --- POST @tornado.web.authenticated @@ -448,8 +453,8 @@ class ReviewHandler(BaseHandler): uid = test['student'] name = self.testapp.get_name(uid) - self.render('review.html', t=test, uid=uid, name=name, - md=md_to_html, templ=self._templates) + self.render('review.html', t=test, uid=uid, name=name, md=md_to_html, + templ=self._templates, debug=self.testapp.debug) # ---------------------------------------------------------------------------- diff --git a/perguntations/templates/question-information.html b/perguntations/templates/question-information.html index 91e0498..6d86399 100644 --- a/perguntations/templates/question-information.html +++ b/perguntations/templates/question-information.html @@ -17,9 +17,9 @@ {{ md(q['text']) }} - {% if show_ref %} + {% if debug %}
file: {{ q['path'] }}/{{ q['filename'] }}
ref: {{ q['ref'] }} {% end %} - \ No newline at end of file + diff --git a/perguntations/templates/question.html b/perguntations/templates/question.html index b5ce323..3f635fc 100644 --- a/perguntations/templates/question.html +++ b/perguntations/templates/question.html @@ -29,11 +29,11 @@

- {% if show_ref %} + {% if debug %} {% end %} -{% end %} \ No newline at end of file +{% end %} diff --git a/perguntations/templates/review-question-information.html b/perguntations/templates/review-question-information.html index a17a93c..54a15f4 100644 --- a/perguntations/templates/review-question-information.html +++ b/perguntations/templates/review-question-information.html @@ -16,9 +16,9 @@
{{ md(q['text']) }}
- {% if t['show_ref'] %} + {% if debug %}
file: {{ q['path'] }}/{{ q['filename'] }}
ref: {{ q['ref'] }} {% end %} - \ No newline at end of file + diff --git a/perguntations/templates/review-question.html b/perguntations/templates/review-question.html index 2d994d1..938cb96 100644 --- a/perguntations/templates/review-question.html +++ b/perguntations/templates/review-question.html @@ -65,7 +65,7 @@ {% end %} {% end %} - {% if t['show_ref'] %} + {% if debug %}
file: {{ q['path'] }}/{{ q['filename'] }}
ref: {{ q['ref'] }} @@ -109,7 +109,7 @@ {% end %}

- {% if t['show_ref'] %} + {% if debug %}
file: {{ q['path'] }}/{{ q['filename'] }}
ref: {{ q['ref'] }} @@ -118,4 +118,4 @@ {% end %} -{% end %} \ No newline at end of file +{% end %} diff --git a/perguntations/templates/review.html b/perguntations/templates/review.html index 48e32f7..02436d6 100644 --- a/perguntations/templates/review.html +++ b/perguntations/templates/review.html @@ -113,7 +113,7 @@ {% for i, q in enumerate(t['questions']) %} - {% module Template(templ[q['type']], i=i, q=q, md=md(q['ref']), t=t) %} + {% module Template(templ[q['type']], i=i, q=q, md=md(q['ref']), t=t, debug=debug) %} {% end %} diff --git a/perguntations/templates/test.html b/perguntations/templates/test.html index 692c321..ba5f5b3 100644 --- a/perguntations/templates/test.html +++ b/perguntations/templates/test.html @@ -114,7 +114,7 @@ {% module xsrf_form_html() %} {% for i, q in enumerate(t['questions']) %} - {% module Template(templ[q['type']], i=i, q=q, md=md(q['ref']), show_ref=t['show_ref']) %} + {% module Template(templ[q['type']], i=i, q=q, md=md(q['ref']), debug=debug) %} {% end %}
diff --git a/perguntations/testfactory.py b/perguntations/testfactory.py index b4c39f4..b251e07 100644 --- a/perguntations/testfactory.py +++ b/perguntations/testfactory.py @@ -6,8 +6,9 @@ TestFactory - generates tests for students from os import path import random import logging -import re -from typing import TypedDict + +# other libraries +import schema # this project from perguntations.questions import QFactory, QuestionException, QDict @@ -17,10 +18,49 @@ from perguntations.tools import load_yaml # Logger configuration logger = logging.getLogger(__name__) -ConfigDict = TypedDict('ConfigDict', { - 'title': str - # TODO add other fields - }) +# --- test validation -------------------------------------------------------- +def check_answers_directory(ans: str) -> bool: + '''Checks is answers_dir exists and is writable''' + testfile = path.join(path.expanduser(ans), 'REMOVE-ME') + try: + with open(testfile, 'w', encoding='utf-8') as file: + file.write('You can safely remove this file.') + except OSError: + return False + return True + +def check_import_files(files: list) -> bool: + '''Checks if the question files exist''' + if not files: + return False + for file in files: + if not path.isfile(file): + return False + return True + +def normalize_question_list(questions: list) -> None: + '''convert question ref from string to list of string''' + for question in questions: + if isinstance(question['ref'], str): + question['ref'] = [question['ref']] + +test_schema = schema.Schema({ + 'ref': schema.Regex('^[a-zA-Z0-9_-]+$'), + 'database': schema.And(str, path.isfile), + 'answers_dir': schema.And(str, check_answers_directory), + 'title': str, + schema.Optional('duration'): int, + schema.Optional('autosubmit'): bool, + schema.Optional('autocorrect'): bool, + schema.Optional('show_points'): bool, + schema.Optional('scale'): schema.And([schema.Use(float)], + lambda s: len(s) == 2), + 'files': schema.And([str], check_import_files), + 'questions': [{ + 'ref': schema.Or(str, [str]), + schema.Optional('points'): float + }] + }, ignore_extra_keys=True) # ============================================================================ class TestFactoryException(Exception): @@ -36,35 +76,31 @@ class TestFactory(dict): ''' # ------------------------------------------------------------------------ - def __init__(self, conf: ConfigDict) -> None: + def __init__(self, conf) -> None: ''' Loads configuration from yaml file, then overrides some configurations using the conf argument. Base questions are added to a pool of questions factories. ''' + test_schema.validate(conf) + # --- set test defaults and then use given configuration super().__init__({ # defaults - 'title': '', 'show_points': True, 'scale': None, 'duration': 0, # 0=infinite 'autosubmit': False, 'autocorrect': True, - # 'debug': False, # FIXME not property of a test... - 'show_ref': False, }) self.update(conf) + normalize_question_list(self['questions']) # --- for review, we are done. no factories needed # if self['review']: FIXME # logger.info('Review mode. No questions loaded. No factories.') # return - # --- perform sanity checks and normalize the test questions - self.sanity_checks() - logger.info('Sanity checks PASSED.') - # --- find refs of all questions used in the test qrefs = {r for qq in self['questions'] for r in qq['ref']} logger.info('Declared %d questions (each test uses %d).', @@ -74,7 +110,7 @@ class TestFactory(dict): self['question_factory'] = {} for file in self["files"]: - fullpath = path.normpath(path.join(self["questions_dir"], file)) + fullpath = path.normpath(file) logger.info('Loading "%s"...', fullpath) questions = load_yaml(fullpath) # , default=[]) @@ -85,7 +121,7 @@ class TestFactory(dict): msg = f'Question {i} in {file} is not a dictionary' raise TestFactoryException(msg) - # check if ref is missing, then set to '/path/file.yaml:3' + # check if ref is missing, then set to '//file.yaml:3' if 'ref' not in question: question['ref'] = f'{file}:{i:04}' logger.warning('Missing ref set to "%s"', question["ref"]) @@ -115,103 +151,104 @@ class TestFactory(dict): # ------------------------------------------------------------------------ - def check_test_ref(self) -> None: - '''Test must have a `ref`''' - if 'ref' not in self: - raise TestFactoryException('Missing "ref" in configuration!') - if not re.match(r'^[a-zA-Z0-9_-]+$', self['ref']): - raise TestFactoryException('Test "ref" can only contain the ' - 'characters a-zA-Z0-9_-') - - def check_missing_database(self) -> None: - '''Test must have a database''' - if 'database' not in self: - raise TestFactoryException('Missing "database" in configuration') - if not path.isfile(path.expanduser(self['database'])): - msg = f'Database "{self["database"]}" not found!' - raise TestFactoryException(msg) - - def check_missing_answers_directory(self) -> None: - '''Test must have a answers directory''' - if 'answers_dir' not in self: - msg = 'Missing "answers_dir" in configuration' - raise TestFactoryException(msg) - - def check_answers_directory_writable(self) -> None: - '''Answers directory must be writable''' - testfile = path.join(path.expanduser(self['answers_dir']), 'REMOVE-ME') - try: - with open(testfile, 'w', encoding='utf-8') as file: - file.write('You can safely remove this file.') - except OSError as exc: - msg = f'Cannot write answers to directory "{self["answers_dir"]}"' - raise TestFactoryException(msg) from exc - - def check_questions_directory(self) -> None: - '''Check if questions directory is missing or not accessible.''' - if 'questions_dir' not in self: - logger.warning('Missing "questions_dir". Using "%s"', - path.abspath(path.curdir)) - self['questions_dir'] = path.curdir - elif not path.isdir(path.expanduser(self['questions_dir'])): - raise TestFactoryException(f'Can\'t find questions directory ' - f'"{self["questions_dir"]}"') - - def check_import_files(self) -> None: - '''Check if there are files to import (with questions)''' - if 'files' not in self: - msg = ('Missing "files" in configuration with the list of ' - 'question files to import!') - raise TestFactoryException(msg) - - if isinstance(self['files'], str): - self['files'] = [self['files']] - - def check_question_list(self) -> None: - '''normalize question list''' - if 'questions' not in self: - raise TestFactoryException('Missing "questions" in configuration') - - for i, question in enumerate(self['questions']): - # normalize question to a dict and ref to a list of references - if isinstance(question, str): # e.g., - some_ref - question = {'ref': [question]} # becomes - ref: [some_ref] - elif isinstance(question, dict) and isinstance(question['ref'], str): - question['ref'] = [question['ref']] - elif isinstance(question, list): - question = {'ref': [str(a) for a in question]} - - self['questions'][i] = question - - def check_missing_title(self) -> None: - '''Warns if title is missing''' - if not self['title']: - logger.warning('Title is undefined!') - - def check_grade_scaling(self) -> None: - '''Just informs the scale limits''' - if 'scale_points' in self: - msg = ('*** DEPRECATION WARNING: *** scale_points, scale_min, ' - 'scale_max were replaced by "scale: [min, max]".') - logger.warning(msg) - self['scale'] = [self['scale_min'], self['scale_max']] + # def check_test_ref(self) -> None: + # '''Test must have a `ref`''' + # if 'ref' not in self: + # raise TestFactoryException('Missing "ref" in configuration!') + # if not re.match(r'^[a-zA-Z0-9_-]+$', self['ref']): + # raise TestFactoryException('Test "ref" can only contain the ' + # 'characters a-zA-Z0-9_-') + + # def check_missing_database(self) -> None: + # '''Test must have a database''' + # if 'database' not in self: + # raise TestFactoryException('Missing "database" in configuration') + # if not path.isfile(path.expanduser(self['database'])): + # msg = f'Database "{self["database"]}" not found!' + # raise TestFactoryException(msg) + + # def check_missing_answers_directory(self) -> None: + # '''Test must have a answers directory''' + # if 'answers_dir' not in self: + # msg = 'Missing "answers_dir" in configuration' + # raise TestFactoryException(msg) + + # def check_answers_directory_writable(self) -> None: + # '''Answers directory must be writable''' + # testfile = path.join(path.expanduser(self['answers_dir']), 'REMOVE-ME') + # try: + # with open(testfile, 'w', encoding='utf-8') as file: + # file.write('You can safely remove this file.') + # except OSError as exc: + # msg = f'Cannot write answers to directory "{self["answers_dir"]}"' + # raise TestFactoryException(msg) from exc + + # def check_questions_directory(self) -> None: + # '''Check if questions directory is missing or not accessible.''' + # if 'questions_dir' not in self: + # logger.warning('Missing "questions_dir". Using "%s"', + # path.abspath(path.curdir)) + # self['questions_dir'] = path.curdir + # elif not path.isdir(path.expanduser(self['questions_dir'])): + # raise TestFactoryException(f'Can\'t find questions directory ' + # f'"{self["questions_dir"]}"') + + # def check_import_files(self) -> None: + # '''Check if there are files to import (with questions)''' + # if 'files' not in self: + # msg = ('Missing "files" in configuration with the list of ' + # 'question files to import!') + # raise TestFactoryException(msg) + + # if isinstance(self['files'], str): + # self['files'] = [self['files']] + + # def check_question_list(self) -> None: + # '''normalize question list''' + # if 'questions' not in self: + # raise TestFactoryException('Missing "questions" in configuration') + + # for i, question in enumerate(self['questions']): + # # normalize question to a dict and ref to a list of references + # if isinstance(question, str): # e.g., - some_ref + # logger.warning(f'Question "{question}" should be a dictionary') + # question = {'ref': [question]} # becomes - ref: [some_ref] + # elif isinstance(question, dict) and isinstance(question['ref'], str): + # question['ref'] = [question['ref']] + # elif isinstance(question, list): + # question = {'ref': [str(a) for a in question]} + + # self['questions'][i] = question + + # def check_missing_title(self) -> None: + # '''Warns if title is missing''' + # if not self['title']: + # logger.warning('Title is undefined!') + + # def check_grade_scaling(self) -> None: + # '''Just informs the scale limits''' + # if 'scale_points' in self: + # msg = ('*** DEPRECATION WARNING: *** scale_points, scale_min, ' + # 'scale_max were replaced by "scale: [min, max]".') + # logger.warning(msg) + # self['scale'] = [self['scale_min'], self['scale_max']] # ------------------------------------------------------------------------ - def sanity_checks(self) -> None: - ''' - Checks for valid keys and sets default values. - Also checks if some files and directories exist - ''' - self.check_test_ref() - self.check_missing_database() - self.check_missing_answers_directory() - self.check_answers_directory_writable() - self.check_questions_directory() - self.check_import_files() - self.check_question_list() - self.check_missing_title() - self.check_grade_scaling() + # def sanity_checks(self) -> None: + # ''' + # Checks for valid keys and sets default values. + # Also checks if some files and directories exist + # ''' + # self.check_test_ref() + # self.check_missing_database() + # self.check_missing_answers_directory() + # self.check_answers_directory_writable() + # self.check_questions_directory() + # self.check_import_files() + # self.check_question_list() + # self.check_missing_title() + # self.check_grade_scaling() # ------------------------------------------------------------------------ def check_questions(self) -> None: @@ -230,42 +267,6 @@ class TestFactory(dict): if question['type'] == 'textarea': _runtests_textarea(qref, question) - # if 'tests_right' in question: - # for tnum, right_answer in enumerate(question['tests_right']): - # try: - # question.set_answer(right_answer) - # question.correct() - # except Exception as exc: - # msg = f'Failed to correct "{qref}"' - # raise TestFactoryException(msg) from exc - - # if question['grade'] == 1.0: - # logger.info(' test %i Ok', tnum) - # else: - # logger.error(' TEST %i IS WRONG!!!', tnum) - # elif 'tests_wrong' in question: - # for tnum, wrong_answer in enumerate(question['tests_wrong']): - # try: - # question.set_answer(wrong_answer) - # question.correct() - # except Exception as exc: - # msg = f'Failed to correct "{qref}"' - # raise TestFactoryException(msg) from exc - - # if question['grade'] < 1.0: - # logger.info(' test %i Ok', tnum) - # else: - # logger.error(' TEST %i IS WRONG!!!', tnum) - # else: - # try: - # question.set_answer('') - # question.correct() - # except Exception as exc: - # msg = f'Failed to correct "{qref}"' - # raise TestFactoryException(msg) from exc - # else: - # logger.info(' correct Ok but no tests to run') - # ------------------------------------------------------------------------ async def generate(self): ''' @@ -323,11 +324,8 @@ class TestFactory(dict): logger.error('%s errors found!', nerr) # copy these from the test configuratoin to each test instance - inherit = {'ref', 'title', 'database', 'answers_dir', - 'questions_dir', 'files', - 'duration', 'autosubmit', 'autocorrect', - 'scale', 'show_points', 'show_ref'} - # NOT INCLUDED: testfile, allow_all, review, debug + inherit = ['ref', 'title', 'database', 'answers_dir', 'files', 'scale', + 'duration', 'autosubmit', 'autocorrect', 'show_points'] return Test({'questions': questions, **{k:self[k] for k in inherit}}) -- libgit2 0.21.2