diff --git a/app/experiment/views.py b/app/experiment/views.py index 137d5c4f544310881b3dcb201b7c48e205e509c7..d36ced6e557bd57e5740d377c89e8b3e93056ef0 100644 --- a/app/experiment/views.py +++ b/app/experiment/views.py @@ -965,19 +965,22 @@ def statistics(): # those are in answer table as page_idpage and question_idquestion respectively slider_answers = {} for participant in participants: - if participant.answer_counter > 0: - answers = answer.query.filter_by( - answer_set_idanswer_set=participant.idanswer_set)\ - .order_by(answer.page_idpage)\ - .all() - # flatten pages and questions to list of tuples (page_id, question_id) - _questions = [ - item for sublist in pages_and_questions.values() for item in sublist] + if int(participant.answer_counter) == 0: + continue + + answers = answer.query.filter_by( + answer_set_idanswer_set=participant.idanswer_set)\ + .order_by(answer.page_idpage)\ + .all() + + # flatten pages and questions to list of tuples (page_id, question_id) + _questions = [ + item for sublist in pages_and_questions.values() for item in sublist] - slider_answers[participant.session] = map_answers_to_questions( - answers, _questions) + slider_answers[participant.session] = map_answers_to_questions( + answers, _questions) mean = get_mean_from_slider_answers(slider_answers) # slider_answers['mean'] = get_mean_from_slider_answers(slider_answers) diff --git a/app/routes.py b/app/routes.py index 13550adb00e16e92932debd4928639ccff2627d6..cf6e2b4deb93873fa0ed5f06aa25ae483d84d444 100644 --- a/app/routes.py +++ b/app/routes.py @@ -21,7 +21,7 @@ from app.models import background_question_option from app.models import answer_set, answer, forced_id from app.models import user, trial_randomization from app.forms import LoginForm, RegisterForm, StartWithIdForm -from app.utils import saved_data_as_file, map_answers_to_questions +from app.utils import saved_data_as_file, map_answers_to_questions, timeit # Stimuli upload folder setting APP_ROOT = os.path.dirname(os.path.abspath(__file__)) @@ -336,6 +336,7 @@ def view_research_notification(): return render_template('view_research_notification.html', research_notification_filename=research_notification_filename) +@timeit @app.route('/download_csv') @login_required def download_csv(): @@ -367,8 +368,8 @@ def download_csv(): # create CSV-header header = 'participant id;' - header += ';'.join([str(count) + '. bg_question: ' + question.background_question.strip() - for count, question in enumerate(bg_questions, 1)]) + header += ';'.join([str(count) + '. bg_question: ' + q.background_question.strip() + for count, q in enumerate(bg_questions, 1)]) for idx in range(1, len(pages) + 1): if len(questions) > 0: @@ -381,113 +382,129 @@ def download_csv(): question.picture.strip() for count, question in enumerate(embody_questions, 1)]) csv += header + '\r\n' + + csv += generate_answers(participants, pages, questions, embody_questions) + + filename = "experiment_{}_{}.csv".format( + exp_id, date.today().strftime("%Y-%m-%d")) + + return saved_data_as_file(filename, csv) + + +@timeit +def generate_answers(participants, pages, questions, embody_questions): + + csv = '' answer_row = '' for participant in participants: + + + # list only finished answer sets - if participant.answer_counter > 0: - try: - # append user session id - answer_row += participant.session + ';' + if int(participant.answer_counter) == 0: + continue - # append background question answers - bg_answers = background_question_answer.query.filter_by( - answer_set_idanswer_set=participant.idanswer_set).all() - bg_answers_list = [str(a.answer).strip() for a in bg_answers] - answer_row += ';'.join(bg_answers_list) + ';' + try: + # append user session id + answer_row += participant.session + ';' - # append slider answers - slider_answers = answer.query.filter_by( - answer_set_idanswer_set=participant.idanswer_set) \ - .order_by(answer.page_idpage) \ - .all() + # append background question answers + bg_answers = background_question_answer.query.filter_by( + answer_set_idanswer_set=participant.idanswer_set).all() + bg_answers_list = [str(a.answer).strip() for a in bg_answers] + answer_row += ';'.join(bg_answers_list) + ';' + # append slider answers + slider_answers = answer.query.filter_by( + answer_set_idanswer_set=participant.idanswer_set) \ + .order_by(answer.page_idpage, answer.question_idquestion) \ + .all() + - pages_and_questions = {} - for p in pages: - questions_list = [(p.idpage, a.idquestion) for a in questions] - pages_and_questions[p.idpage] = questions_list + pages_and_questions = {} - _questions = [ - item for sublist in pages_and_questions.values() for item in sublist] + for p in pages: + questions_list = [(p.idpage, a.idquestion) for a in questions] + pages_and_questions[p.idpage] = questions_list - answers_list = map_answers_to_questions(slider_answers, _questions) + _questions = [ + item for sublist in pages_and_questions.values() for item in sublist] - # typecast elemnts to string - answers_list = [str(a).strip() for a in answers_list] + answers_list = map_answers_to_questions(slider_answers, _questions) - answer_row += ';'.join(answers_list) + \ - ';' if slider_answers else len( - questions) * len(pages) * ';' + # typecast elemnts to string + answers_list = [str(a).strip() for a in answers_list] - # append embody answers (coordinates) - # save embody answers as bitmap images - embody_answers = embody_answer.query.filter_by( - answer_set_idanswer_set=participant.idanswer_set) \ - .order_by(embody_answer.page_idpage) \ - .all() + answer_row += ';'.join(answers_list) + \ + ';' if slider_answers else len( + questions) * len(pages) * ';' - pages_and_questions = {} - for p in pages: - questions_list = [(p.idpage, a.idembody) for a in embody_questions] - pages_and_questions[p.idpage] = questions_list + # append embody answers (coordinates) + # save embody answers as bitmap images + embody_answers = embody_answer.query.filter_by( + answer_set_idanswer_set=participant.idanswer_set) \ + .order_by(embody_answer.page_idpage) \ + .all() - _questions = [ - item for sublist in pages_and_questions.values() for item in sublist] + pages_and_questions = {} - _embody_answers = map_answers_to_questions(embody_answers, _questions) + for p in pages: + questions_list = [(p.idpage, a.idembody) for a in embody_questions] + pages_and_questions[p.idpage] = questions_list - answers_list = [] + _questions = [ + item for sublist in pages_and_questions.values() for item in sublist] - for answer_data in _embody_answers: + _embody_answers = map_answers_to_questions(embody_answers, _questions) - if not answer_data: - answers_list.append('') - continue + answers_list = [] - try: - coordinates = json.loads(answer_data.coordinates) - em_height = coordinates.get('height', 600) + 2 - em_width = coordinates.get('width', 200) + 2 + for answer_data in _embody_answers: - coordinates_to_bitmap = [ - [0 for x in range(em_height)] for y in range(em_width)] + if not answer_data: + answers_list.append('') + continue - coordinates = list( - zip(coordinates.get('x'), coordinates.get('y'))) + try: + coordinates = json.loads(answer_data.coordinates) + em_height = coordinates.get('height', 600) + 2 + em_width = coordinates.get('width', 200) + 2 - for point in coordinates: + coordinates_to_bitmap = [ + [0 for x in range(em_height)] for y in range(em_width)] - try: - # for every brush stroke, increment the pixel - # value for every brush stroke - coordinates_to_bitmap[point[0]][point[1]] += 0.1 - except IndexError: - continue + coordinates = list( + zip(coordinates.get('x'), coordinates.get('y'))) - answers_list.append(json.dumps(coordinates_to_bitmap)) + for point in coordinates: - except ValueError as err: - app.logger(err) + try: + # for every brush stroke, increment the pixel + # value for every brush stroke + coordinates_to_bitmap[point[0]][point[1]] += 0.1 + except IndexError: + continue - answer_row += ';'.join(answers_list) if embody_answers else \ - len(embody_questions) * len(pages) * ';' + answers_list.append(json.dumps(coordinates_to_bitmap)) - # old way to save only visited points: - # answers_list = [json.dumps( - # list(zip( json.loads(a.coordinates)['x'], - # json.loads(a.coordinates)['y']))) for a in embody_answers] + except ValueError as err: + app.logger(err) - except TypeError as err: - print(err) + answer_row += ';'.join(answers_list) if embody_answers else \ + len(embody_questions) * len(pages) * ';' - csv += answer_row + '\r\n' - answer_row = '' + # old way to save only visited points: + # answers_list = [json.dumps( + # list(zip( json.loads(a.coordinates)['x'], + # json.loads(a.coordinates)['y']))) for a in embody_answers] - filename = "experiment_{}_{}.csv".format( - exp_id, date.today().strftime("%Y-%m-%d")) + except TypeError as err: + print(err) - return saved_data_as_file(filename, csv) + csv += answer_row + '\r\n' + answer_row = '' + return csv @app.route('/researcher_info') diff --git a/app/utils.py b/app/utils.py index 2087e85972194902b94572114b58e4446986eb69..cbd4290502800e585ba6fe5f5f8028c5e4455614 100644 --- a/app/utils.py +++ b/app/utils.py @@ -1,16 +1,31 @@ import os import tempfile +import time from itertools import zip_longest from flask import send_file +def timeit(method): + def timed(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + if 'log_time' in kw: + name = kw.get('log_name', method.__name__.upper()) + kw['log_time'][name] = int((te - ts) * 1000) + else: + print('{} {:2.2f} ms'.format(method.__name__, (te - ts) * 1000)) + return result + + return timed + + def map_values_to_int(values: dict): #values = [map(int, i) for i in list(values.values())] return zip_longest(*values.values(), fillvalue=None) def calculate_mean(values: list) -> float: - print(values) n_answers = sum(x is not None for x in values) sum_of_answers = float(sum(filter(None, values))) mean = sum_of_answers / n_answers @@ -52,6 +67,12 @@ def get_values_from_list_of_answers(page_question, answers): return None +def question_matches_answer(question, answer): + if (answer.page_idpage == question[0] and answer.question_idquestion == question[1]): + return True + return False + + def map_answers_to_questions(answers, questions): ''' questions = [(4, 1), (4, 2), (5, 1), (5, 2), (6, 1), (6, 2)] @@ -60,6 +81,55 @@ def map_answers_to_questions(answers, questions): -> partial_answer = [None, None, None, None, 100, 99] ''' + + + nth_answer = 0 + + results = [] + + for question in questions: + + current_answer = answers[nth_answer] + + if question_matches_answer(question, current_answer): + results.append(int(current_answer.answer)) + nth_answer += 1 + else: + results.append(None) + + return results + + ''' return list(map( lambda x: get_values_from_list_of_answers(x, answers), questions)) + ''' + + +''' +select sub.answer_set_idanswer_set, group_concat(concat( + COALESCE(sub.aa, ''), + COALESCE(sub.ab, ''), + COALESCE(sub.ba, ''), + COALESCE(sub.bb, ''), + COALESCE(sub.ca, ''), + COALESCE(sub.cb, '') +)) +FROM ( + select *, + case when page_idpage = 4 and question_idquestion = 1 then answer end as aa, + case when page_idpage = 4 and question_idquestion = 2 then answer end as ab, + case when page_idpage = 5 and question_idquestion = 1 then answer end as ba, + case when page_idpage = 5 and question_idquestion = 2 then answer end as bb, + case when page_idpage = 6 and question_idquestion = 1 then answer end as ca, + case when page_idpage = 6 and question_idquestion = 2 then answer end as cb + from answer where answer_set_idanswer_set in ( select idanswer_set from answer_set where experiment_idexperiment = 2 and answer_counter != 0 ) +) as sub +group by sub.answer_set_idanswer_set; + + + + +# all possible page/question comobs +select distinct p.idpage, q.idquestion from question q join page p on p.experiment_idexperiment=q.experiment_idexperiment where p.experiment_idexperiment = 2 order by p.idpage,q.idquestion; +''' \ No newline at end of file