From 93c4dec938ed9d6f4e4cc468adc246034e2b7123 Mon Sep 17 00:00:00 2001 From: Miguel Barão Date: Wed, 9 Dec 2020 16:57:24 +0000 Subject: [PATCH] - trying to implement a --correct option to corrected previously submitted tests. (NOT YET FUNCTIONAL) - removed question type code since it can be done in textarea using the jobe_submit module and is more flexible --- BUGS.md | 3 +++ demo/demo.yaml | 9 +++++++-- demo/questions/questions-tutorial.yaml | 96 ++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------ perguntations/app.py | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------- perguntations/main.py | 8 ++++++-- perguntations/models.py | 2 +- perguntations/questions.py | 227 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------------------------------------------------------------------------- perguntations/serve.py | 19 ++++++++++--------- perguntations/templates/grade.html | 60 +++++++----------------------------------------------------- perguntations/test.py | 50 ++++++++++++++++++++++++++++++++------------------ perguntations/testfactory.py | 3 ++- 11 files changed, 319 insertions(+), 258 deletions(-) diff --git a/BUGS.md b/BUGS.md index dddc10d..cc4fe1d 100644 --- a/BUGS.md +++ b/BUGS.md @@ -1,9 +1,12 @@ # BUGS +- cookies existe um perguntations_user e um user. De onde vem o user? +- nao esta a mostrar imagens?? internal server error? - JOBE correct async - esta a corrigir código JOBE mesmo que nao tenha respondido??? - QuestionCode falta reportar nos comments os vários erros que podem ocorrer (timeout, etc) + - algumas vezes a base de dados guarda o mesmo teste em duplicado. ver se dois submits dao origem a duas correcções. talvez a base de dados devesse ter como chave do teste um id que fosse único desse teste particular (não um auto counter, nem ref do teste) - em caso de timeout na submissão (e.g. JOBE ou script nao responde) a correcção não termina e o teste não é guardado. diff --git a/demo/demo.yaml b/demo/demo.yaml index b741b34..2f73919 100644 --- a/demo/demo.yaml +++ b/demo/demo.yaml @@ -31,6 +31,12 @@ duration: 20 # (default: false) autosubmit: true +# If true, the test will be corrected on submission, the grade calculated and +# shown to the student. If false, the test is saved but not corrected. +# No grade is shown to the student. +# (default: true) +autocorrect: false + # Show points for each question (min and max). # (default: true) show_points: true @@ -74,5 +80,4 @@ questions: - [tut-alert1, tut-alert2] - tut-generator - tut-yamllint - - tut-code - + # - tut-code diff --git a/demo/questions/questions-tutorial.yaml b/demo/questions/questions-tutorial.yaml index 06c8499..a2bfbbc 100644 --- a/demo/questions/questions-tutorial.yaml +++ b/demo/questions/questions-tutorial.yaml @@ -576,7 +576,7 @@ # ---------------------------------------------------------------------------- - type: information text: | - This question is not included in the test and will not shown up. + This question is not included in the test and will not show up. It also lacks a "ref" and is automatically named `questions/questions-tutorial.yaml:0013`. A warning is shown on the console about this. @@ -612,50 +612,50 @@ ``` # ---------------------------------------------------------------------------- -- type: code - ref: tut-code - title: Submissão de código (JOBE) - text: | - É possível enviar código para ser compilado e executado por um servidor - JOBE instalado separadamente, ver [JOBE](https://github.com/trampgeek/jobe). - - ```yaml - - type: code - ref: tut-code - title: Submissão de código (JOBE) - text: | - Escreva um programa em C que recebe uma string no standard input e - mostra a mensagem `hello ` seguida da string. - Por exemplo, se o input for `Maria`, o output deverá ser `hello Maria`. - language: c - correct: - - stdin: 'Maria' - stdout: 'hello Maria' - - stdin: 'xyz' - stdout: 'hello xyz' - ``` - - Existem várias linguagens suportadas pelo servidor JOBE (C, C++, Java, - Python2, Python3, Octave, Pascal, PHP). - O campo `correct` deverá ser uma lista de casos a testar. - Se um caso incluir `stdin`, este será enviado para o programa e o `stdout` - obtido será comparado com o declarado. A pergunta é considerada correcta se - todos os outputs coincidirem. - - Por defeito é o usado o servidor JOBE declarado no teste. Para usar outro - diferente nesta pergunta usa-se a opção `server: 127.0.0.1` com o endereço - apropriado. - answer: | - #include - int main() { - char name[20]; - scanf("%s", name); - printf("hello %s", name); - } - # server: 192.168.1.85 - language: c - correct: - - stdin: 'Maria' - stdout: 'hello Maria' - - stdin: 'xyz' - stdout: 'hello xyz' +# - type: code +# ref: tut-code +# title: Submissão de código (JOBE) +# text: | +# É possível enviar código para ser compilado e executado por um servidor +# JOBE instalado separadamente, ver [JOBE](https://github.com/trampgeek/jobe). + +# ```yaml +# - type: code +# ref: tut-code +# title: Submissão de código (JOBE) +# text: | +# Escreva um programa em C que recebe uma string no standard input e +# mostra a mensagem `hello ` seguida da string. +# Por exemplo, se o input for `Maria`, o output deverá ser `hello Maria`. +# language: c +# correct: +# - stdin: 'Maria' +# stdout: 'hello Maria' +# - stdin: 'xyz' +# stdout: 'hello xyz' +# ``` + +# Existem várias linguagens suportadas pelo servidor JOBE (C, C++, Java, +# Python2, Python3, Octave, Pascal, PHP). +# O campo `correct` deverá ser uma lista de casos a testar. +# Se um caso incluir `stdin`, este será enviado para o programa e o `stdout` +# obtido será comparado com o declarado. A pergunta é considerada correcta se +# todos os outputs coincidirem. + +# Por defeito é o usado o servidor JOBE declarado no teste. Para usar outro +# diferente nesta pergunta usa-se a opção `server: 127.0.0.1` com o endereço +# apropriado. +# answer: | +# #include +# int main() { +# char name[20]; +# scanf("%s", name); +# printf("hello %s", name); +# } +# # server: 192.168.1.85 +# language: c +# correct: +# - stdin: 'Maria' +# stdout: 'hello Maria' +# - stdin: 'xyz' +# stdout: 'hello xyz' diff --git a/perguntations/app.py b/perguntations/app.py index dc9b5a3..fbc55ac 100644 --- a/perguntations/app.py +++ b/perguntations/app.py @@ -21,6 +21,8 @@ from sqlalchemy.orm import sessionmaker from perguntations.models import Student, Test, Question from perguntations.tools import load_yaml from perguntations.testfactory import TestFactory, TestFactoryException +import perguntations.test +from perguntations.questions import QuestionFrom logger = logging.getLogger(__name__) @@ -33,12 +35,12 @@ class AppException(Exception): # ============================================================================ # helper functions # ============================================================================ -async def check_password(try_pw, password): +async def check_password(try_pw, hashed_pw): '''check password in executor''' try_pw = try_pw.encode('utf-8') loop = asyncio.get_running_loop() - hashed = await loop.run_in_executor(None, bcrypt.hashpw, try_pw, password) - return password == hashed + hashed = await loop.run_in_executor(None, bcrypt.hashpw, try_pw, hashed_pw) + return hashed_pw == hashed async def hash_password(password): @@ -121,6 +123,39 @@ class App(): else: logger.info('No tests were generated.') + if conf['correct']: + self._correct_tests() + + # ------------------------------------------------------------------------ + def _correct_tests(self): + with self._db_session() as sess: + filenames = sess.query(Test.filename)\ + .filter(Test.ref == self.testfactory['ref'])\ + .filter(Test.state == "SUBMITTED")\ + .all() + # print([(x.filename, x.state, x.grade) for x in a]) + logger.info('Correcting %d tests...', len(filenames)) + + for filename, in filenames: + try: + with open(filename) as file: + testdict = json.load(file) + except FileNotFoundError: + logger.error('File not found: %s', filename) + continue + + test = perguntations.test.Test(testdict) + print(test['questions'][7]['correct']) + test['questions'] = [QuestionFrom(q) for q in test['questions']] + + print(test['questions'][7]['correct']) + test.correct() + logger.info('Student %s: grade = %f', test['student']['number'], test['grade']) + + + # FIXME update JSON and database + + # ------------------------------------------------------------------------ async def login(self, uid, try_pw, headers=None): '''login authentication''' @@ -129,15 +164,15 @@ class App(): return 'unauthorized' with self._db_session() as sess: - name, password = sess.query(Student.name, Student.password)\ + name, hashed_pw = sess.query(Student.name, Student.password)\ .filter_by(id=uid)\ .one() - if password == '': # update password on first login + if hashed_pw == '': # update password on first login await self.update_student_password(uid, try_pw) pw_ok = True else: # check password - pw_ok = await check_password(try_pw, password) # async bcrypt + pw_ok = await check_password(try_pw, hashed_pw) # async bcrypt if not pw_ok: # wrong password logger.info('"%s" wrong password.', uid) @@ -216,6 +251,7 @@ class App(): '''get test from online student or raise exception''' return self.online[uid]['test'] + # ------------------------------------------------------------------------ async def _new_test(self, uid): ''' assign a test to a given student. if there are pregenerated tests then @@ -233,13 +269,13 @@ class App(): else: logger.info('"%s" using a pregenerated test.', uid) - test.register(student) # student signs the test + test.start(student) # student signs the test self.online[uid]['test'] = test # ------------------------------------------------------------------------ - async def correct_test(self, uid, ans): + async def submit_test(self, uid, ans): ''' - Corrects test + Handles test submission and correction. ans is a dictionary {question_index: answer, ...} with the answers for the complete test. For example: {0:'hello', 1:[1,2]} @@ -247,49 +283,55 @@ class App(): test = self.online[uid]['test'] # --- submit answers and correct test - test.update_answers(ans) + test.submit(ans) logger.info('"%s" submitted %d answers.', uid, len(ans)) - grade = await test.correct() - logger.info('"%s" grade = %g points.', uid, grade) + if test['autocorrect']: + await test.correct_async() + logger.info('"%s" grade = %g points.', uid, test['grade']) # --- save test in JSON format fields = (uid, test['ref'], str(test['finish_time'])) fname = '--'.join(fields) + '.json' fpath = path.join(test['answers_dir'], fname) with open(path.expanduser(fpath), 'w') as file: - json.dump(test, file, indent=2, default=str) - # option default=str is required for datetime objects - + json.dump(test, file, indent=2, default=str) # str for datetime logger.info('"%s" saved JSON.', uid) - # --- insert test and questions into database + # --- insert test and questions into the database + # only corrected questions are added test_row = Test( ref=test['ref'], title=test['title'], grade=test['grade'], state=test['state'], - comment='', + comment=test['comment'], starttime=str(test['start_time']), finishtime=str(test['finish_time']), filename=fpath, student_id=uid) - test_row.questions = [Question( - number=n, - ref=q['ref'], - grade=q['grade'], - comment=q.get('comment', ''), - starttime=str(test['start_time']), - finishtime=str(test['finish_time']), - test_id=test['ref']) - for n, q in enumerate(test['questions']) - if 'grade' in q + + test_row.questions = [ + Question( + number=n, + ref=q['ref'], + grade=q['grade'], + comment=q.get('comment', ''), + starttime=str(test['start_time']), + finishtime=str(test['finish_time']), + test_id=test['ref'] + ) + for n, q in enumerate(test['questions']) + if 'grade' in q ] + with self._db_session() as sess: sess.add(test_row) - logger.info('"%s" database updated.', uid) - return grade + + # ------------------------------------------------------------------------ + def get_student_grade(self, uid): + return self.online[uid]['test'].get('grade', None) # ------------------------------------------------------------------------ # def giveup_test(self, uid): diff --git a/perguntations/main.py b/perguntations/main.py index 8a7c813..444b6e2 100644 --- a/perguntations/main.py +++ b/perguntations/main.py @@ -49,6 +49,9 @@ def parse_cmdline_arguments(): parser.add_argument('--review', action='store_true', help='Review mode: doesn\'t generate test') + parser.add_argument('--correct', + action='store_true', + help='Correct test and update JSON files and database') parser.add_argument('--port', type=int, default=8443, @@ -123,11 +126,12 @@ def main(): # --- start application -------------------------------------------------- config = { 'testfile': args.testfile, - 'debug': args.debug, + 'debug': args.debug, 'allow_all': args.allow_all, 'allow_list': args.allow_list, 'show_ref': args.show_ref, - 'review': args.review, + 'review': args.review, + 'correct': args.correct, } try: diff --git a/perguntations/models.py b/perguntations/models.py index 87a308c..fd2fbfc 100644 --- a/perguntations/models.py +++ b/perguntations/models.py @@ -41,7 +41,7 @@ class Test(Base): ref = Column(String) title = Column(String) grade = Column(Float) - state = Column(String) # ACTIVE, FINISHED, QUIT, NULL + state = Column(String) # ACTIVE, SUBMITTED, CORRECTED, QUIT, NULL comment = Column(String) starttime = Column(String) finishtime = Column(String) diff --git a/perguntations/questions.py b/perguntations/questions.py index 4ec0f79..fc585df 100644 --- a/perguntations/questions.py +++ b/perguntations/questions.py @@ -32,6 +32,44 @@ QDict = NewType('QDict', Dict[str, Any]) class QuestionException(Exception): '''Exceptions raised in this module''' +# FIXME if this works, use it below +def QuestionFrom(question: dict): + types = { + 'radio': QuestionRadio, + 'checkbox': QuestionCheckbox, + 'text': QuestionText, + 'text-regex': QuestionTextRegex, + 'numeric-interval': QuestionNumericInterval, + 'textarea': QuestionTextArea, + # 'code': QuestionCode, + # -- informative panels -- + 'information': QuestionInformation, + 'success': QuestionInformation, + 'warning': QuestionInformation, + 'alert': QuestionInformation, + } + + # Get class for this question type + try: + qclass = types[question['type']] + except KeyError: + logger.error('Invalid type "%s" in "%s"', + question['type'], question['ref']) + raise + + # Finally create an instance of Question() + try: + qinstance = qclass(QDict(question)) + except QuestionException: + logger.error('Error generating question "%s". See "%s/%s"', + question['ref'], + question['path'], + question['filename']) + raise + + return qinstance + + # ============================================================================ # Questions derived from Question are already instantiated and ready to be @@ -590,101 +628,101 @@ class QuestionTextArea(Question): # ============================================================================ -class QuestionCode(Question): - ''' - Submits answer to a JOBE server to compile and run against the test cases. - ''' - - _outcomes = { - 0: 'JOBE outcome: Successful run', - 11: 'JOBE outcome: Compile error', - 12: 'JOBE outcome: Runtime error', - 13: 'JOBE outcome: Time limit exceeded', - 15: 'JOBE outcome: Successful run', - 17: 'JOBE outcome: Memory limit exceeded', - 19: 'JOBE outcome: Illegal system call', - 20: 'JOBE outcome: Internal error, please report', - 21: 'JOBE outcome: Server overload', - } - - # ------------------------------------------------------------------------ - def __init__(self, q: QDict) -> None: - super().__init__(q) - - self.set_defaults(QDict({ - 'text': '', - 'timeout': 5, # seconds - 'server': '127.0.0.1', # JOBE server - 'language': 'c', - 'correct': [{'stdin': '', 'stdout': '', 'stderr': '', 'args': ''}], - })) - - # ------------------------------------------------------------------------ - def correct(self) -> None: - super().correct() - - if self['answer'] is None: - return - - # submit answer to JOBE server - resource = '/jobe/index.php/restapi/runs/' - headers = {"Content-type": "application/json; charset=utf-8", - "Accept": "application/json"} - - for expected in self['correct']: - data_json = json.dumps({ - 'run_spec' : { - 'language_id': self['language'], - 'sourcecode': self['answer'], - 'input': expected.get('stdin', ''), - }, - }) - - try: - connect = http.client.HTTPConnection(self['server']) - connect.request( - method='POST', - url=resource, - body=data_json, - headers=headers - ) - response = connect.getresponse() - logger.debug('JOBE response status %d', response.status) - if response.status != 204: - content = response.read().decode('utf8') - if content: - result = json.loads(content) - connect.close() - - except (HTTPError, ValueError): - logger.error('HTTPError while connecting to JOBE server') - - try: - outcome = result['outcome'] - except (NameError, TypeError, KeyError): - logger.error('Bad result returned from JOBE server: %s', result) - return - logger.debug(self._outcomes[outcome]) - - - - if result['cmpinfo']: # compiler errors and warnings - self['comments'] = f'Erros de compilação:\n{result["cmpinfo"]}' - self['grade'] = 0.0 - return - - if result['stdout'] != expected.get('stdout', ''): - self['comments'] = 'O output gerado é diferente do esperado.' # FIXME mostrar porque? - self['grade'] = 0.0 - return - - self['comments'] = 'Ok!' - self['grade'] = 1.0 - +# class QuestionCode(Question): +# ''' +# Submits answer to a JOBE server to compile and run against the test cases. +# ''' + +# _outcomes = { +# 0: 'JOBE outcome: Successful run', +# 11: 'JOBE outcome: Compile error', +# 12: 'JOBE outcome: Runtime error', +# 13: 'JOBE outcome: Time limit exceeded', +# 15: 'JOBE outcome: Successful run', +# 17: 'JOBE outcome: Memory limit exceeded', +# 19: 'JOBE outcome: Illegal system call', +# 20: 'JOBE outcome: Internal error, please report', +# 21: 'JOBE outcome: Server overload', +# } + +# # ------------------------------------------------------------------------ +# def __init__(self, q: QDict) -> None: +# super().__init__(q) + +# self.set_defaults(QDict({ +# 'text': '', +# 'timeout': 5, # seconds +# 'server': '127.0.0.1', # JOBE server +# 'language': 'c', +# 'correct': [{'stdin': '', 'stdout': '', 'stderr': '', 'args': ''}], +# })) # ------------------------------------------------------------------------ - async def correct_async(self) -> None: - self.correct() + # def correct(self) -> None: + # super().correct() + + # if self['answer'] is None: + # return + + # # submit answer to JOBE server + # resource = '/jobe/index.php/restapi/runs/' + # headers = {"Content-type": "application/json; charset=utf-8", + # "Accept": "application/json"} + + # for expected in self['correct']: + # data_json = json.dumps({ + # 'run_spec' : { + # 'language_id': self['language'], + # 'sourcecode': self['answer'], + # 'input': expected.get('stdin', ''), + # }, + # }) + + # try: + # connect = http.client.HTTPConnection(self['server']) + # connect.request( + # method='POST', + # url=resource, + # body=data_json, + # headers=headers + # ) + # response = connect.getresponse() + # logger.debug('JOBE response status %d', response.status) + # if response.status != 204: + # content = response.read().decode('utf8') + # if content: + # result = json.loads(content) + # connect.close() + + # except (HTTPError, ValueError): + # logger.error('HTTPError while connecting to JOBE server') + + # try: + # outcome = result['outcome'] + # except (NameError, TypeError, KeyError): + # logger.error('Bad result returned from JOBE server: %s', result) + # return + # logger.debug(self._outcomes[outcome]) + + + + # if result['cmpinfo']: # compiler errors and warnings + # self['comments'] = f'Erros de compilação:\n{result["cmpinfo"]}' + # self['grade'] = 0.0 + # return + + # if result['stdout'] != expected.get('stdout', ''): + # self['comments'] = 'O output gerado é diferente do esperado.' # FIXME mostrar porque? + # self['grade'] = 0.0 + # return + + # self['comments'] = 'Ok!' + # self['grade'] = 1.0 + + + # # ------------------------------------------------------------------------ + # async def correct_async(self) -> None: + # self.correct() # FIXME there is no async correction!!! # out = run_script( @@ -731,7 +769,6 @@ class QuestionInformation(Question): super().correct() self['grade'] = 1.0 # always "correct" but points should be zero! - # ============================================================================ class QFactory(): ''' @@ -774,7 +811,7 @@ class QFactory(): 'text-regex': QuestionTextRegex, 'numeric-interval': QuestionNumericInterval, 'textarea': QuestionTextArea, - 'code': QuestionCode, + # 'code': QuestionCode, # -- informative panels -- 'information': QuestionInformation, 'success': QuestionInformation, diff --git a/perguntations/serve.py b/perguntations/serve.py index 26e933c..d4b843d 100644 --- a/perguntations/serve.py +++ b/perguntations/serve.py @@ -187,12 +187,12 @@ class LoginHandler(BaseHandler): error = await self.testapp.login(uid, password, headers) - if error is None: - self.set_secure_cookie('perguntations_user', str(uid)) - self.redirect('/') - else: + if error: await asyncio.sleep(3) # delay to avoid spamming the server... self.render('login.html', error=self._error_msg[error]) + else: + self.set_secure_cookie('perguntations_user', str(uid)) + self.redirect('/') # ---------------------------------------------------------------------------- @@ -293,18 +293,19 @@ class RootHandler(BaseHandler): 'numeric-interval', 'code'): ans[i] = ans[i][0] - # correct answered questions and logout - await self.testapp.correct_test(uid, ans) + # submit answered questions, correct + await self.testapp.submit_test(uid, ans) # show final grade and grades of other tests in the database - allgrades = self.testapp.get_student_grades_from_all_tests(uid) + # allgrades = self.testapp.get_student_grades_from_all_tests(uid) + grade = self.testapp.get_student_grade(uid) + self.render('grade.html', t=test) self.clear_cookie('perguntations_user') - self.render('grade.html', t=test, allgrades=allgrades) self.testapp.logout(uid) timeit_finish = timer() - logging.info(' correction took %fs', timeit_finish-timeit_start) + logging.info(' elapsed time: %fs', timeit_finish-timeit_start) # ---------------------------------------------------------------------------- diff --git a/perguntations/templates/grade.html b/perguntations/templates/grade.html index 3137d07..351ea20 100644 --- a/perguntations/templates/grade.html +++ b/perguntations/templates/grade.html @@ -41,67 +41,21 @@
{% if t['state'] == 'FINISHED' %} -

Resultado: +

Resultado: {{ f'{round(t["grade"], 3)}' }} - valores na escala de {{t['scale'][0]}} a {{t['scale'][1]}}. -

-

O seu teste foi correctamente entregue e a nota registada.

-

Clique aqui para sair do teste

+ valores na escala [{{t['scale'][0]}},{{t['scale'][1]}}]. + {% if t['grade'] - t['scale'][0] >= 0.75*(t['scale'][1] - t['scale'][0]) %} {% end %} + {% elif t['state'] == 'SUBMITTED' %} +

A prova foi submetida com sucesso. Vai ser corrigida mais tarde.

{% elif t['state'] == 'QUIT' %} -

Foi registada a sua desistência da prova.

+

Foi registada a sua desistência da prova.

{% end %} +

Clique aqui para terminar

- -
-
- Histórico de resultados -
- - - - - - - - - - - {% for g in allgrades %} - - - - - - - {% end %} - -
ProvaDataHoraNota
{{g[0]}} {{g[2][:10]}} {{g[2][11:19]}} -
-
- - {{ str(round(g[1], 1)) }} - -
-
-
-
diff --git a/perguntations/test.py b/perguntations/test.py index 7c963b8..62d5696 100644 --- a/perguntations/test.py +++ b/perguntations/test.py @@ -5,6 +5,7 @@ Test - instances of this class are individual tests # python standard library from datetime import datetime import logging +from math import nan # Logger configuration logger = logging.getLogger(__name__) @@ -17,11 +18,13 @@ class Test(dict): ''' # ------------------------------------------------------------------------ - # def __init__(self, d): - # super().__init__(d) + def __init__(self, d): + super().__init__(d) + self['grade'] = nan + self['comment'] = '' # ------------------------------------------------------------------------ - def register(self, student: dict) -> None: + def start(self, student: dict) -> None: ''' Write student id in the test and register start time ''' @@ -29,7 +32,6 @@ class Test(dict): self['start_time'] = datetime.now() self['finish_time'] = None self['state'] = 'ACTIVE' - self['comment'] = '' # ------------------------------------------------------------------------ def reset_answers(self) -> None: @@ -43,44 +45,56 @@ class Test(dict): self['questions'][ref].set_answer(ans) # ------------------------------------------------------------------------ - def update_answers(self, answers_dict) -> None: + def submit(self, answers_dict) -> None: ''' Given a dictionary ans={'ref': 'some answer'} updates the answers of multiple questions in the test. Only affects the questions referred in the dictionary. ''' + self['finish_time'] = datetime.now() for ref, ans in answers_dict.items(): self['questions'][ref].set_answer(ans) + self['state'] = 'SUBMITTED' # ------------------------------------------------------------------------ - async def correct(self) -> float: + async def correct_async(self) -> None: '''Corrects all the answers of the test and computes the final grade''' - self['finish_time'] = datetime.now() - self['state'] = 'FINISHED' - grade = 0.0 for question in self['questions']: await question.correct_async() grade += question['grade'] * question['points'] logger.debug('Correcting %30s: %3g%%', - question["ref"], question["grade"]*100) + question['ref'], question['grade']*100) + + # truncate to avoid negative final grade and adjust scale + self['grade'] = max(0.0, grade) + self['scale'][0] + self['state'] = 'CORRECTED' + + # ------------------------------------------------------------------------ + def correct(self) -> None: + '''Corrects all the answers of the test and computes the final grade''' + grade = 0.0 + for question in self['questions']: + question.correct() + grade += question['grade'] * question['points'] + logger.debug('Correcting %30s: %3g%%', + question['ref'], question['grade']*100) # truncate to avoid negative final grade and adjust scale self['grade'] = max(0.0, grade) + self['scale'][0] - return self['grade'] + self['state'] = 'CORRECTED' # ------------------------------------------------------------------------ - def giveup(self) -> float: + def giveup(self) -> None: '''Test is marqued as QUIT and is not corrected''' self['finish_time'] = datetime.now() self['state'] = 'QUIT' self['grade'] = 0.0 - logger.info('Student %s: gave up.', self["student"]["number"]) - return self['grade'] # ------------------------------------------------------------------------ def __str__(self) -> str: - return ('Test:\n' - f' student: {self.get("student", "--")}\n' - f' start_time: {self.get("start_time", "--")}\n' - f' questions: {", ".join(q["ref"] for q in self["questions"])}\n') + return '\n'.join([f'{k}: {v}' for k,v in self.items()]) + # return ('Test:\n' + # f' student: {self.get("student", "--")}\n' + # f' start_time: {self.get("start_time", "--")}\n' + # f' questions: {", ".join(q["ref"] for q in self["questions"])}\n') diff --git a/perguntations/testfactory.py b/perguntations/testfactory.py index e6f085c..20edf1d 100644 --- a/perguntations/testfactory.py +++ b/perguntations/testfactory.py @@ -46,6 +46,7 @@ class TestFactory(dict): 'scale': None, 'duration': 0, # 0=infinite 'autosubmit': False, + 'autocorrect': True, 'debug': False, 'show_ref': False, }) @@ -300,7 +301,7 @@ class TestFactory(dict): # copy these from the test configuratoin to each test instance inherit = {'ref', 'title', 'database', 'answers_dir', 'questions_dir', 'files', - 'duration', 'autosubmit', + 'duration', 'autosubmit', 'autocorrect', 'scale', 'show_points', 'show_ref', 'debug', } # NOT INCLUDED: testfile, allow_all, review -- libgit2 0.21.2