test.py 13.3 KB
'''
TestFactory - generates tests for students
Test - instances of this class are individual tests
'''


# python standard library
from os import path
import random
from datetime import datetime
import logging

# this project
from perguntations.questions import QFactory, QuestionException
from perguntations.tools import load_yaml

# Logger configuration
logger = logging.getLogger(__name__)


# ============================================================================
class TestFactoryException(Exception):
    '''exception raised in this module'''


# ============================================================================
class TestFactory(dict):
    '''
    Each instance of TestFactory() is a test generator.
    For example, if we want to serve two different tests, then we need two
    instances of TestFactory(), one for each test.
    '''

    # ------------------------------------------------------------------------
    def __init__(self, conf):
        '''
        Loads configuration from yaml file, then overrides some configurations
        using the conf argument.
        Base questions are added to a pool of questions factories.
        '''

        # --- set test defaults and then use given configuration
        super().__init__({  # defaults
            'title': '',
            'show_points': True,
            'scale': None,  # or [0, 20]
            'duration': 0,  # 0=infinite
            'autosubmit': False,
            'debug': False,
            'show_ref': False
            })
        self.update(conf)

        # --- perform sanity checks and normalize the test questions
        self.sanity_checks()
        logger.info('Sanity checks PASSED.')

        # --- find refs of all questions used in the test
        qrefs = {r for qq in self['questions'] for r in qq['ref']}
        logger.info('Declared %d questions (each test uses %d).',
                    len(qrefs), len(self["questions"]))

        # --- for review, we are done. no factories needed
        if self['review']:
            logger.info('Review mode. No questions loaded. No factories.')
            return

        # --- load and build question factories
        self.question_factory = {}

        counter = 1
        for file in self["files"]:
            fullpath = path.normpath(path.join(self["questions_dir"], file))
            (dirname, filename) = path.split(fullpath)

            logger.info('Loading "%s"...', fullpath)
            questions = load_yaml(fullpath)   # , default=[])

            for i, question in enumerate(questions):
                # make sure every question in the file is a dictionary
                if not isinstance(question, dict):
                    msg = f'Question {i} in {file} is not a dictionary'
                    raise TestFactoryException(msg)

                # check if ref is missing, then set to '/path/file.yaml:3'
                if 'ref' not in question:
                    question['ref'] = f'{file}:{i:04}'
                    logger.warning('Missing ref set to "%s"', question["ref"])

                # check for duplicate refs
                if question['ref'] in self.question_factory:
                    other = self.question_factory[question['ref']]
                    otherfile = path.join(other.question['path'],
                                          other.question['filename'])
                    msg = (f'Duplicate reference "{question["ref"]}" in files '
                           f'"{otherfile}" and "{fullpath}".')
                    raise TestFactoryException(msg)

                # make factory only for the questions used in the test
                if question['ref'] in qrefs:
                    question.setdefault('type', 'information')
                    question.update({
                        'filename': filename,
                        'path': dirname,
                        'index': i            # position in the file, 0 based
                        })

                    self.question_factory[question['ref']] = QFactory(question)

                    # check if all the questions can be correctly generated
                    try:
                        self.question_factory[question['ref']].generate()
                    except Exception:
                        msg = f'Failed to generate "{question["ref"]}"'
                        raise TestFactoryException(msg)
                    else:
                        logger.info('%4d.  "%s" Ok.', counter, question["ref"])
                    counter += 1

        qmissing = qrefs.difference(set(self.question_factory.keys()))
        if qmissing:
            raise TestFactoryException(f'Could not find questions {qmissing}.')


    # ------------------------------------------------------------------------
    def check_missing_ref(self):
        '''Test must have a `ref`'''
        if 'ref' not in self:
            raise TestFactoryException('Missing "ref" in configuration!')

    def check_missing_database(self):
        '''Test must have a database'''
        if 'database' not in self:
            raise TestFactoryException('Missing "database" in configuration')
        if not path.isfile(path.expanduser(self['database'])):
            msg = f'Database "{self["database"]}" not found!'
            raise TestFactoryException(msg)

    def check_missing_answers_directory(self):
        '''Test must have a answers directory'''
        if 'answers_dir' not in self:
            msg = 'Missing "answers_dir" in configuration'
            raise TestFactoryException(msg)

    def check_answers_directory_writable(self):
        '''Answers directory must be writable'''
        testfile = path.join(path.expanduser(self['answers_dir']), 'REMOVE-ME')
        try:
            with open(testfile, 'w') as file:
                file.write('You can safely remove this file.')
        except OSError:
            msg = f'Cannot write answers to directory "{self["answers_dir"]}"'
            raise TestFactoryException(msg)

    def check_questions_directory(self):
        '''Check if questions directory is missing or not accessible.'''
        if 'questions_dir' not in self:
            logger.warning('Missing "questions_dir". Using "%s"',
                           path.abspath(path.curdir))
            self['questions_dir'] = path.curdir
        elif not path.isdir(path.expanduser(self['questions_dir'])):
            raise TestFactoryException(f'Can\'t find questions directory '
                                       f'"{self["questions_dir"]}"')

    def check_import_files(self):
        '''Check if there are files to import (with questions)'''
        if 'files' not in self:
            msg = ('Missing "files" in configuration with the list of '
                   'question files to import!')
            raise TestFactoryException(msg)

        if isinstance(self['files'], str):
            self['files'] = [self['files']]

    def check_question_list(self):
        '''normalize question list'''
        if 'questions' not in self:
            raise TestFactoryException('Missing "questions" in configuration')

        for i, question in enumerate(self['questions']):
            # normalize question to a dict and ref to a list of references
            if isinstance(question, str):  # e.g.,    - some_ref
                question = {'ref': [question]}    # becomes  - ref: [some_ref]
            elif isinstance(question, dict) and isinstance(question['ref'], str):
                question['ref'] = [question['ref']]
            elif isinstance(question, list):
                question = {'ref': [str(a) for a in question]}

            self['questions'][i] = question

    def check_missing_title(self):
        '''Warns if title is missing'''
        if not self['title']:
            logger.warning('Title is undefined!')

    def check_grade_scaling(self):
        '''Just informs the scale limits'''
        if 'scale_points' in self:
            msg = ('*** DEPRECATION WARNING: *** scale_points, scale_min, '
                   'scale_max were replaced by "scale: [min, max]".')
            logger.warning(msg)
            self['scale'] = [self['scale_min'], self['scale_max']]


    # ------------------------------------------------------------------------
    def sanity_checks(self):
        '''
        Checks for valid keys and sets default values.
        Also checks if some files and directories exist
        '''
        self.check_missing_ref()
        self.check_missing_database()
        self.check_missing_answers_directory()
        self.check_answers_directory_writable()
        self.check_questions_directory()
        self.check_import_files()
        self.check_question_list()
        self.check_missing_title()
        self.check_grade_scaling()

    # ------------------------------------------------------------------------
    async def generate(self, student):
        '''
        Given a dictionary with a student dict {'name':'john', 'number': 123}
        returns instance of Test() for that particular student
        '''

        # make list of questions
        test = []
        qnum = 1     # track question number
        nerr = 0  # count errors generating questions

        for qlist in self['questions']:
            # choose one question variant
            qref = random.choice(qlist['ref'])

            # generate instance of question
            try:
                question = await self.question_factory[qref].gen_async()
            except QuestionException:
                logger.error('Can\'t generate question "%s". Skipping.', qref)
                nerr += 1
                continue

            # some defaults
            if question['type'] in ('information', 'success', 'warning',
                                    'alert'):
                question['points'] = qlist.get('points', 0.0)
            else:
                question['points'] = qlist.get('points', 1.0)
                question['number'] = qnum  # counter for non informative panels
                qnum += 1

            test.append(question)

        # setup scale
        total_points = sum(q['points'] for q in test)

        if total_points > 0:
            # normalize question points to scale
            if self['scale'] is not None:
                scale_min, scale_max = self['scale']
                for question in test:
                    question['points'] *= (scale_max - scale_min) / total_points
            else:
                self['scale'] = [0, total_points]
        else:
            logger.warning('Total points is **ZERO**.')
            if self['scale'] is None:
                self['scale'] = [0, 20]

        if nerr > 0:
            logger.error('%s errors found!', nerr)

        # these will be copied to the test instance
        inherit = {'ref', 'title', 'database', 'answers_dir',
                   'questions_dir', 'files',
                   'duration', 'autosubmit',
                   'scale', 'show_points',
                   'show_ref', 'debug', }
        # NOT INCLUDED:  testfile, allow_all, review

        return Test({
            **{'student': student, 'questions': test},
            **{k:self[k] for k in inherit}})

    # ------------------------------------------------------------------------
    def __repr__(self):
        testsettings = '\n'.join(f'  {k:14s}: {v}' for k, v in self.items())
        return '{\n' + testsettings + '\n}'


# ============================================================================
class Test(dict):
    '''
    Each instance Test() is a concrete test of a single student.
    '''

    # ------------------------------------------------------------------------
    def __init__(self, d):
        super().__init__(d)
        self['start_time'] = datetime.now()
        self['finish_time'] = None
        self['state'] = 'ACTIVE'
        self['comment'] = ''

    # ------------------------------------------------------------------------
    def reset_answers(self):
        '''Removes all answers from the test (clean)'''
        for question in self['questions']:
            question['answer'] = None

    # ------------------------------------------------------------------------
    def update_answers(self, ans):
        '''
        Given a dictionary ans={'ref': 'some answer'} updates the answers of
        the test. Only affects the questions referred in the dictionary.
        '''
        for ref, answer in ans.items():
            self['questions'][ref]['answer'] = answer

    # ------------------------------------------------------------------------
    async def correct(self):
        '''Corrects all the answers of the test and computes the final grade'''
        self['finish_time'] = datetime.now()
        self['state'] = 'FINISHED'

        grade = 0.0
        for question in self['questions']:
            await question.correct_async()
            grade += question['grade'] * question['points']
            logger.debug('Correcting %30s: %3g%%',
                         question["ref"], question["grade"]*100)

        # truncate to avoid negative final grade and adjust scale
        self['grade'] = max(0.0, grade) + self['scale'][0]
        return self['grade']

    # ------------------------------------------------------------------------
    def giveup(self):
        '''Test is marqued as QUIT and is not corrected'''
        self['finish_time'] = datetime.now()
        self['state'] = 'QUIT'
        self['grade'] = 0.0
        logger.info('Student %s:  gave up.', self["student"]["number"])
        return self['grade']