diff --git a/app/experiment/views.py b/app/experiment/views.py
index 711f3fa397dcf1f1137f68798f781a6f39ebe2a2..3bf8aeb43a4882db9df4da9611a502afe9dab6dd 100644
--- a/app/experiment/views.py
+++ b/app/experiment/views.py
@@ -1062,24 +1062,17 @@ def start_download_csv():
 def download_csv(meta):
     exp_id = meta["exp_id"]
 
-    data = generate_csv(exp_id)
-
-    # error handling
-    if isinstance(data, Exception):
-        emit('timeout', {'exc': str(data)})
-        return
-
     # create temporary file
     fd, path = mkstemp()
-    with os.fdopen(fd, 'w') as tmp:
-        tmp.write(data)
-        tmp.flush()
-
-    # return path and filename to front so user can start downloading
-    filename = "experiment_{}_{}".format(
-        exp_id, date.today().strftime("%Y-%m-%d"))
-    path = path.split('/')[-1]
-    emit('file_ready', {'path': path, 'filename': filename})
+    with os.fdopen(fd, 'w', buffering=1) as tmp:
+        if generate_csv(exp_id, tmp):
+            # return path and filename to front so user can start downloading
+            filename = "experiment_{}_{}".format(
+                exp_id, date.today().strftime("%Y-%m-%d"))
+            path = path.split('/')[-1]
+            emit('file_ready', {'path': path, 'filename': filename})
+        else:
+            emit('timeout', {'exc': 'job failed'})
 
 
 @socketio.on('end', namespace="/download_csv")
diff --git a/app/utils.py b/app/utils.py
index a64c872534fec86fd76fe7e9eb037990d3820aa3..9fc8802751717657f9761625779ca9564de4cc8c 100644
--- a/app/utils.py
+++ b/app/utils.py
@@ -109,7 +109,7 @@ def map_answers_to_questions(answers, questions):
 
 
 @timeit
-def generate_csv(exp_id):
+def generate_csv(exp_id, file_handle):
 
     # answer sets with participant ids
     participants = answer_set.query.filter_by(
@@ -129,8 +129,6 @@ def generate_csv(exp_id):
     embody_questions = embody_question.query.filter_by(
         experiment_idexperiment=exp_id).all()
 
-    csv = ''
-
     # create CSV-header
     header = 'participant id;'
     header += ';'.join([str(count) + '. bg_question: ' + q.background_question.strip()
@@ -146,7 +144,7 @@ def generate_csv(exp_id):
             header += ';' + ';'.join(['page' + str(idx) + '_' + str(count) + '. embody_question: ' +
                                       question.picture.strip() for count, question in enumerate(embody_questions, 1)])
 
-    csv += header + '\r\n'
+    file_handle.write(header + '\r\n')
 
     # filter empty answer_sets
     participants = list(filter(lambda participant: True if int(
@@ -167,16 +165,17 @@ def generate_csv(exp_id):
             try:
                 emit('progress', {'done': nth, 'from': len_participants})
                 data = future.result()
-                csv += data + '\r\n'
+                file_handle.write(data + '\n')
+                # to ensure that all internal buffers associated with fd are written to disk
+                file_handle.flush()
             except Exception as exc:
                 print('generated an exception: {}'.format(exc))
-                return exc
-
-    return csv
+                # return False
+    
+    return True
 
 
 def generate_answer_row(participant, pages, questions, embody_questions):
-    # TODO: refactor
 
     with app.app_context():
 
@@ -264,7 +263,8 @@ def generate_answer_row(participant, pages, questions, embody_questions):
                 answers_list.append(json.dumps(coordinates_to_bitmap))
 
             except ValueError as err:
-                app.logger(err)
+                print(err)
+                #app.logger(err)
 
         answer_row += ';'.join(answers_list) if embody_answers else \
             len(embody_questions) * len(pages) * ';'