diff --git a/client/sbs_client.py b/client/sbs_client.py
index 9ba2bd4c11afbc934874343db7a26bbe87909004..6b477ab9032bd0c48e423b11d7a5b2ed762290f1 100755
--- a/client/sbs_client.py
+++ b/client/sbs_client.py
@@ -63,7 +63,7 @@ def takePicture(cameraIndex, model):
             raise Exception('Unable to capture image')
 
         # matching serverside parameters, for comparable results
-        faces = model.detectMultiScale(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), 1.3, 5)
+        faces = detectFaces(model, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
 
         if len(faces) != 0:
             cv2.imwrite(TMP_IMAGE, frame)
@@ -81,6 +81,14 @@ def takePicture(cameraIndex, model):
 def logError(msg):
     logging.error('{0}:{1}'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), msg))
 
+def detectFaces(model, image):
+    return model.detectMultiScale(image,
+        scaleFactor=1.1,
+        minNeighbors=5,
+        minSize=(30, 30),
+        flags=cv2.cv.CV_HAAR_SCALE_IMAGE
+    )
+
 def main(argv):
     if argv != None and (len(argv) <= 2 or not re.match("^[\w\d_-]+$", argv[1]) or not re.match("^[\d]+$", argv[2])):
         return False
diff --git a/contrib/src/web/emotion_gender_processor.py b/contrib/src/web/emotion_gender_processor.py
index e4c880fa882d36c2ba2c4d146ab107968a13986e..20f291ab307382b1ea1b04789d2cf4af973403b3 100644
--- a/contrib/src/web/emotion_gender_processor.py
+++ b/contrib/src/web/emotion_gender_processor.py
@@ -46,6 +46,14 @@ class EGProcessor:
     def isValid(self):
         return self.valid
 
+    def detectFaces(self, model, image):
+        return model.detectMultiScale(image,
+            scaleFactor=1.1,
+            minNeighbors=5,
+            minSize=(30, 30),
+            flags=cv2.cv.CV_HAAR_SCALE_IMAGE
+        )
+
     def processImage(self, image, result_fname = None, type = 'png', detect_emotion = True):
         result = []
 
@@ -68,7 +76,7 @@ class EGProcessor:
             gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)
             gray_face = None
 
-            faces = detect_faces(self.face_detection, gray_image)
+            faces = self.detectFaces(self.face_detection, gray_image)
             for face_coordinates in faces:
                 x1, x2, y1, y2 = apply_offsets(face_coordinates, self.gender_offsets)
                 rgb_face = rgb_image[y1:y2, x1:x2]