diff --git a/apps/README.md b/apps/README.md
index 0142c31e..57e3b3b6 100644
--- a/apps/README.md
+++ b/apps/README.md
@@ -19,6 +19,7 @@ For a step by step tutorial on how to build an image classification network look
|[simple_classifier_cpp](simple_classifier_cpp/README.md) | C++
Multiple Networks
Application reads a single image from the filesystem and does an image classification inference on that image. Takes the image, the network and a labels file on the commandline|![](simple_classifier_cpp/screen_shot.jpg)|
|[simple_classifier_py](simple_classifier_py/README.md) | Python
Multiple Networks
Application reads a single image from the filesystem and does an image classification inference on that image. Takes the image, the network and a labels file on the commandline|![](simple_classifier_py/screen_shot.jpg)|
|[simple_classifier_py_camera](simple_classifier_py_camera/README.md) | Python
Multiple Networks
Application reads a video stream from a camera and does image classification inference on the stream continually updating the top result.|![](simple_classifier_py_camera/screen_shot.jpg)|
+|[face_emotion_game](face_emotion_game/README.md) | Python
Multiple Networks
Application reads a video stream from a camera and does face detection and emotion recognition, the app presented as a game, and user can collect scores by his facial expressions.|![](face_emotion_game/screenshot.jpg)|
## Object Detection Applications
diff --git a/apps/face_emotion_game/Makefile b/apps/face_emotion_game/Makefile
new file mode 100644
index 00000000..0fee5301
--- /dev/null
+++ b/apps/face_emotion_game/Makefile
@@ -0,0 +1,119 @@
+GREEN = '\033[1;32m'
+YELLOW = '\033[1;33m'
+NOCOLOR = '\033[0m'
+
+# filenames for the graph files that we'll copy to this directory.
+FACE_RELATIVE_DIR = ../../networks/face_detection_retail_0004
+FACE_FILE_NAME_BASE = face-detection-retail-0004
+
+EMOTION_RELATIVE_DIR = ../../networks/emotions_recognition_retail_0003
+EMOTION_FILE_NAME_BASE = emotions-recognition-retail-0003
+
+
+# name of the directory and the base name of the main python file (minus the .py extension)
+APP_NAME = face_emotion_game
+
+.PHONY: all
+all: deps data
+
+
+.PHONY: data
+data:
+ @echo $(YELLOW)'\n'${APP_NAME}": No data required."$(NOCOLOR)
+
+
+.PHONY: deps
+deps: get_ir
+ @echo $(YELLOW)'\n'${APP_NAME}": Making dependencies..."$(NOCOLOR)
+
+
+
+.PHONY: default_model
+default_model: get_ir
+ @echo $(YELLOW)'\n'${APP_NAME}": Making default models..."$(NOCOLOR)
+
+
+.PHONY: get_ir
+get_ir:
+ @echo $(YELLOW)'\n'${APP_NAME}": Downloading IRs..."$(NOCOLOR);
+ @if [ -e ${EMOTION_FILE_NAME_BASE}.xml ] && [ -e ${EMOTION_FILE_NAME_BASE}.bin ] ;\
+ then \
+ echo " - Emotion Recognition IRs already exist in the project folder.";\
+ else \
+ echo " - Emotion Recognition IR files do not exist in project directory.";\
+ echo " - Making Emotion Recognition IRs...";\
+ (cd ${EMOTION_RELATIVE_DIR}; make get_ir;);\
+ echo " - Copying Emotion Recognition IR files to current folder..." ;\
+ mkdir src/data ;\
+ mkdir src/data/emotions-recognition ;\
+ cp ${EMOTION_RELATIVE_DIR}/${EMOTION_FILE_NAME_BASE}.xml src/data/emotions-recognition/ ;\
+ cp ${EMOTION_RELATIVE_DIR}/${EMOTION_FILE_NAME_BASE}.bin src/data/emotions-recognition/ ;\
+ fi;\
+ @if [ -e ${FACE_GEN_MODEL_FILE_NAME_BASE}.xml ] && [ -e ${FACE_GEN_MODEL_FILE_NAME_BASE}.bin ] ;\
+ then \
+ echo " - Face detection model IR already exist in the project folder.";\
+ else \
+ echo " - Face detection model IR files do not exist in project directory.";\
+ echo " - Making Face detection IRs...";\
+ (cd ${FACE_RELATIVE_DIR}; make get_ir;);\
+ echo " - Copying Face IR files to current folder..." ;\
+ mkdir src/data ;\
+ mkdir src/data/face-detection ;\
+ cp ${FACE_RELATIVE_DIR}/${FACE_FILE_NAME_BASE}.xml src/data/face-detection/ ;\
+ cp ${FACE_RELATIVE_DIR}/${FACE_FILE_NAME_BASE}.bin src/data/face-detection/ ;\
+ fi
+
+
+.PHONY: run
+run: run_py
+
+
+.PHONY: run_py
+run_py: deps data
+ @echo $(YELLOW)'\n'${NETWORK_NAME}": Running Python sample..."$(NOCOLOR)
+ @echo "Checking OpenVINO environment..."
+ @if [ -z "$(INTEL_OPENVINO_DIR)" ] ; \
+ then \
+ echo "Please initiate the Intel OpenVINO environment by going to the installation directory for openvino and running the setupvars.sh file in the bin folder." ; \
+ exit 1 ; \
+ else \
+ echo "Intel OpenVINO environment is already set!" ; \
+ fi
+ python3 ${APP_NAME}.py -m -fps;
+
+
+.PHONY: install-reqs
+install-reqs:
+ @echo $(YELLOW)"\n"$(APP_NAME)": Checking installation requirements..."$(NOCOLOR)
+ @echo "No requirements needed."
+
+
+.PHONY: uninstall-reqs
+uninstall-reqs:
+ @echo $(YELLOW)'\n'${APP_NAME}": Uninstalling requirements..."$(NOCOLOR)
+ @echo "Nothing to uninstall."
+
+
+.PHONY: help
+help:
+ @echo "\nPossible make targets: ";
+ @echo $(YELLOW)" make run or run_py"$(NOCOLOR)"- runs the application";
+ @echo $(YELLOW)" make help "$(NOCOLOR)"- shows this message";
+ @echo $(YELLOW)" make all "$(NOCOLOR)"- makes everything needed to run but doesn't run";
+ @echo $(YELLOW)" make data "$(NOCOLOR)"- downloads data as needed";
+ @echo $(YELLOW)" make deps "$(NOCOLOR)"- makes/prepares dependencies";
+ @echo $(YELLOW)" make install-reqs "$(NOCOLOR)"- Installs requirements needed to run this sample on your system.";
+ @echo $(YELLOW)" make uninstall-reqs "$(NOCOLOR)"- Uninstalls requirements that were installed by the sample program.";
+ @echo $(YELLOW)" make default_model "$(NOCOLOR)"- compiles a default model to use when running";
+ @echo $(YELLOW)" make get_ir "$(NOCOLOR)"- gets the age-gender/face IR files from the model zoo";
+ @echo $(YELLOW)" make clean "$(NOCOLOR)"- removes all created content"
+ @echo ""
+
+
+clean:
+ @echo $(YELLOW)'\n'${APP_NAME}": Cleaning up files..."$(NOCOLOR);
+ rm -f src/data/emotions-recognition/${EMOTION_FILE_NAME_BASE}.xml;
+ rm -f src/data/emotions-recognition/${EMOTION_FILE_NAME_BASE}.bin;
+ rm -f src/data/face-detection/${FACE_FILE_NAME_BASE}.xml;
+ rm -f src/data/face-detection/${FACE_FILE_NAME_BASE}.bin;
+
diff --git a/apps/face_emotion_game/README.md b/apps/face_emotion_game/README.md
new file mode 100644
index 00000000..476083c3
--- /dev/null
+++ b/apps/face_emotion_game/README.md
@@ -0,0 +1,65 @@
+# Face Emotion Game
+## Introduction
+This app does facial detection and emotion detection using the Intel Movidius Neural Compute Stick 2.
+
+The example does face detection on a camera frame using face-detection-retail.0004, crops the detected faces, then does emotion recognition using the emotions-recognition-retail-0003 network. When running, the app shows the realtime camera preview while overlaying, a box around faces (color coded for gender), and the facial expressions label. User should do their expression to match the emoji that appears on top of the camera window. All models can be found on the [Open Model Zoo](https://github.com/opencv/open_model_zoo). This sample uses pre-compiled IRs, so the model optimizer is not utilized.
+
+![](src\images\face_emotion_game.png)
+
+
+## Building the Example
+
+To run the example code do the following:
+1. Open a terminal and change directory to the sample base directory
+2. Type the following command in the terminal: ```make all```
+
+## Running the Example
+
+After building the example you can run the example code by doing the following:
+1. Open a terminal and change directory to the sample base directory
+2. Type the following command in the terminal: ```make run```
+
+When the application runs normally, another window should pop up and show the feed from the webcam/usb cam. The program should perform inferences on faces on frames taken from the webcam/usb cam.
+
+## Prerequisites
+This program requires:
+- 1 x NCS2 device
+- 1 x Raspberry Pi 3
+- 1 x Webcam (USB)
+- OpenVINO 2019 R2 Toolkit
+
+*It may run with older versions but you may see some glitches such as the GUI Window not closing when you click the X in the title bar, and other key binding issues.
+
+Note: All development and testing has been done on Raspberry Pi 3.
+
+## Makefile
+Provided Makefile has various targets that help with the above mentioned tasks.
+
+### make run or make run_cpp
+Runs the sample application.
+
+### make help
+Shows available targets.
+
+### make all
+Builds and/or gathers all the required files needed to run the application.
+
+### make data
+Gathers all of the required data need to run the sample.
+
+### make deps
+Builds all of the dependencies needed to run the sample.
+
+### make default_model
+Compiles an IR file from a default model to be used when running the sample.
+
+### make install-reqs
+Checks required packages that aren't installed as part of the OpenVINO installation.
+
+### make uninstall-reqs
+Uninstalls requirements that were installed by the sample program.
+
+### make clean
+Removes all the temporary files that are created by the Makefile.
+
+
diff --git a/apps/face_emotion_game/face_emotion_game.py b/apps/face_emotion_game/face_emotion_game.py
new file mode 100644
index 00000000..1f7e715e
--- /dev/null
+++ b/apps/face_emotion_game/face_emotion_game.py
@@ -0,0 +1,232 @@
+#! /usr/bin/env python3
+
+# Copyright(c) 2019 Intel Corporation.
+# License: MIT See LICENSE file in root directory.
+
+from argparse import ArgumentParser, SUPPRESS
+from openvino.inference_engine import IENetwork, IEPlugin, IECore
+import cv2
+import logging as log
+import numpy as np
+import random
+import os
+import sys
+import time
+import pickle
+
+# Specify target device
+score = 0
+FRAME_WIDTH = 640
+FRAME_HEIGHT = 480
+totalSecond = 7
+minSecond = 1.0
+startTime = 0.0
+timeElapsed = 0.0
+pick_emoji = 0
+
+RED_COLOR = (255, 0, 0)
+GREEN_COLOR = (50, 255, 50)
+DARK_GREEN_COLOR = (10, 150, 50)
+YELLOW_COLOR = (50, 255, 255)
+
+
+def build_argparser():
+ parser = ArgumentParser(add_help=False)
+ args = parser.add_argument_group('Options')
+ args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
+ args.add_argument("-m", "--mirror", action="store_true", help="Flip camera")
+ args.add_argument("-fps", "--show_fps", action="store_true", help="Show fps information on top of camera view")
+ return parser
+
+
+def show_countdown(frame, emotionLabel):
+ global totalSecond
+ global minSecond
+ global startTime
+ global timeElapsed
+ global pick_emoji
+
+ mid_cam = (int) (FRAME_WIDTH / 2) - 55
+ emoji = cv2.imread(os.getcwd() + '/src/images/{}.png'.format(emotionLabel[pick_emoji]))
+ smile = cv2.addWeighted(frame[5:75, 5 + mid_cam:55 + mid_cam, :], 0.1, emoji, 1, 0)
+ frame[5:75, 5 + mid_cam:55 + mid_cam] = smile
+
+ if totalSecond >= minSecond:
+ if totalSecond <= 5:
+ cv2.putText(frame, str(totalSecond), (mid_cam + 60, 40), cv2.FONT_HERSHEY_DUPLEX, 1, GREEN_COLOR, 2, 1)
+
+ timeElapsed += (time.time() - startTime)
+ if timeElapsed >= 1 and (timeElapsed - (time.time() - startTime) >= 1):
+ totalSecond -= 1
+ timeElapsed = 0
+ startTime = time.time()
+ if totalSecond == 0:
+ return emotionLabel[pick_emoji]
+ else:
+ totalSecond = 7
+ # select random emoji
+ pick_emoji = random.randint(0, 4)
+
+ return None
+
+
+def main():
+ log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
+ args = build_argparser().parse_args()
+
+ face_model_xml = os.getcwd() + "/src/data/face-detection/face-detection-retail-0004.xml"
+ face_model_bin = os.path.splitext(face_model_xml)[0] + ".bin"
+
+ emotions_model_xml = os.getcwd() + "/src/data/emotions-recognition/emotions-recognition-retail-0003.xml"
+ emotions_model_bin = os.path.splitext(emotions_model_xml)[0] + ".bin"
+
+ global score
+ device = "MYRIAD"
+ fps = ""
+ camera_id = 0
+ high_score = 0
+ show_user_helper = False
+
+ emotionLabel = ['Neutral', 'Happy', 'Sad', 'Surprise', 'Anger']
+
+ cap = cv2.VideoCapture(camera_id)
+ log.info("Loading Camera id {}".format(camera_id))
+
+ # Read IR - face detection
+ face_net = IENetwork(model=face_model_xml, weights=face_model_bin)
+ log.info("Face-Detection network has been loaded:\n\t{}\n\t{}".format(face_model_xml, face_model_bin))
+
+ # Read IR - emotions recognition
+ emotion_net = IENetwork(model=emotions_model_xml, weights=emotions_model_bin)
+ log.info("Emotions-Recognition network has been loaded:\n\t{}\n\t{}".format(emotions_model_xml, emotions_model_bin))
+
+ log.info("Setting device: {}".format(device))
+ plugin = IEPlugin(device=device)
+
+ log.info("Loading Face-Detection model to the plugin")
+ face_exec_net = plugin.load(network=face_net)
+ # Set configurations for face detection
+ face_input_blob = next(iter(face_net.inputs))
+ face_out_blob = next(iter(face_net.outputs))
+
+ log.info("Loading Emotions-Recognition model to the plugin")
+ emotion_exec_net = plugin.load(network=emotion_net)
+ # Set configurations for emotion detection
+ emotion_input_blob = next(iter(emotion_net.inputs))
+ emotion_out_blob = next(iter(emotion_net.outputs))
+
+ if args.mirror:
+ log.info("Using camera mirror")
+
+ try:
+ with open('score.dat', 'rb') as file:
+ high_score = pickle.load(file)
+ file.close()
+ except:
+ log.warning("Can't read high score!!!")
+
+ log.info("Game is starting...")
+ while cap.isOpened():
+ t1 = time.time()
+ ret_val, img = cap.read()
+
+ if not ret_val:
+ break
+
+ if args.mirror:
+ img = cv2.flip(img, 1)
+
+ if not show_user_helper:
+
+ cv2.putText(img, 'High score: {}'.format(high_score), (FRAME_WIDTH - 200, 30), cv2.FONT_HERSHEY_DUPLEX, 0.7, RED_COLOR, 1, 1)
+ cv2.putText(img, 'Your score is: {}'.format(score), (5, 30), cv2.FONT_HERSHEY_DUPLEX, 0.7, RED_COLOR, 1, 1)
+ picked_emoji = show_countdown(img, emotionLabel)
+
+ prepimg = cv2.resize(img, (300, 300))
+ prepimg = prepimg[np.newaxis, :, :, :]
+ prepimg = prepimg.transpose((0, 3, 1, 2))
+ face_outputs = face_exec_net.infer(inputs={face_input_blob: prepimg})
+ res = face_exec_net.requests[0].outputs[face_out_blob]
+
+ for detection in res[0][0]:
+ confidence = float(detection[2])
+ xmin = int(detection[3] * img.shape[1])
+ ymin = int(detection[4] * img.shape[0])
+ xmax = int(detection[5] * img.shape[1])
+ ymax = int(detection[6] * img.shape[0])
+
+ if confidence > 0.7:
+ cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color=GREEN_COLOR)
+
+ if ymin >= 64 and ymax >= 64:
+ emoimg = img[ymin:ymax, xmin:xmax]
+ emoimg = cv2.resize(emoimg, (64, 64))
+ emoimg = emoimg.transpose((2, 0, 1))
+ emoimg = emoimg.reshape(1, 3, 64, 64)
+ emotion_outputs = emotion_exec_net.infer(inputs={emotion_input_blob: emoimg})
+ res = emotion_exec_net.requests[0].outputs[emotion_out_blob]
+ out_emotion_reshape = res.reshape(-1, 5)
+ emotion_text = emotionLabel[np.argmax(out_emotion_reshape)]
+
+ cv2.putText(img, emotion_text, (abs(xmin), abs(ymin - 10)), cv2.FONT_HERSHEY_DUPLEX, 0.7, (50, 255, 255), 1, 1)
+
+ if picked_emoji:
+ log.info("picked: {} recognized: {}".format(emotion_text, picked_emoji))
+ if picked_emoji and picked_emoji == emotion_text:
+ score += 10
+ elif picked_emoji and picked_emoji != emotion_text:
+ # save high score
+ with open('score.dat', 'wb') as file:
+ high_score = score if score > high_score else high_score
+ pickle.dump(high_score, file)
+ log.info("Your score: {}".format(str(score)))
+ file.close()
+ # reset personal score
+ score = 0
+
+
+ if args.show_fps:
+ elapsed_time = time.time() - t1
+ fps = "(Playback) {:.1f} FPS".format(1 / elapsed_time)
+ cv2.putText(img, fps, (15, FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
+
+ cv2.putText(img, "Hit 'h' for help", (FRAME_WIDTH - 150, FRAME_HEIGHT - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
+ cv2.putText(img, "Hit 'ESC' or 'q' to Exit", (FRAME_WIDTH - 150, FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
+
+ cv2.imshow('Face Emotion Game', img)
+
+ waitkey = cv2.waitKey(1)
+ if waitkey & 0xFF == ord('q') or waitkey == 27:
+ # save high score
+ with open('score.dat', 'wb') as file:
+ high_score = score if score > high_score else high_score
+ pickle.dump(high_score, file)
+ log.info("High score saved: {}".format(str(high_score)))
+ file.close()
+ break # esc or 'q' to quit
+
+ if waitkey & 0xFF == ord('h'):
+ log.info('Opening help window...')
+ show_user_helper = True
+
+ # on help window:
+ waitkey = cv2.waitKey(1)
+ if waitkey & 0xFF == ord('q') or waitkey == 27:
+ log.info('Exiting help window...')
+
+ show_user_helper = False
+
+ with open ('help.txt', 'r') as helpfile:
+ y0, dy = 50, 15
+ for i, line in enumerate(helpfile.read().split("\n")):
+ y = y0 + i * dy
+ cv2.putText(img, line, (int(30), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
+
+ cv2.putText(img, "Hit 'ESC' or 'q' to quit user help", (FRAME_WIDTH - 220, FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
+ cv2.imshow('Face Emotion Game', img)
+
+ cv2.destroyAllWindows()
+
+
+if __name__ == '__main__':
+ sys.exit(main() or 0)
diff --git a/apps/face_emotion_game/help.txt b/apps/face_emotion_game/help.txt
new file mode 100644
index 00000000..0064361a
--- /dev/null
+++ b/apps/face_emotion_game/help.txt
@@ -0,0 +1,13 @@
+** Face Emotion Game **
+The purpos of the game is to reach the highest score by using your facial expressions.
+
+Game instruction:
+-----------------
+1- Match your facial expressions to the emoji.
+2- Hold your emotion during the countdown time.
+3- In every emotion match, your score will be increased with 10 points.
+4- Countdown is started with 5 to 0, on 0 countdown will reset and start again with 5 sec.
+5- There is a delay between each emoji switch (2 seconds).
+6- High score will be saved once it broke.
+
+Have Fun :)
\ No newline at end of file
diff --git a/apps/face_emotion_game/score.dat b/apps/face_emotion_game/score.dat
new file mode 100644
index 00000000..57b3b9e9
--- /dev/null
+++ b/apps/face_emotion_game/score.dat
@@ -0,0 +1 @@
+€K´.
\ No newline at end of file
diff --git a/apps/face_emotion_game/screenshot.jpg b/apps/face_emotion_game/screenshot.jpg
new file mode 100644
index 00000000..47674e2d
Binary files /dev/null and b/apps/face_emotion_game/screenshot.jpg differ
diff --git a/apps/face_emotion_game/src/images/Anger.png b/apps/face_emotion_game/src/images/Anger.png
new file mode 100644
index 00000000..8664ba4f
Binary files /dev/null and b/apps/face_emotion_game/src/images/Anger.png differ
diff --git a/apps/face_emotion_game/src/images/Happy.png b/apps/face_emotion_game/src/images/Happy.png
new file mode 100644
index 00000000..f0acaca3
Binary files /dev/null and b/apps/face_emotion_game/src/images/Happy.png differ
diff --git a/apps/face_emotion_game/src/images/Neutral.png b/apps/face_emotion_game/src/images/Neutral.png
new file mode 100644
index 00000000..50953423
Binary files /dev/null and b/apps/face_emotion_game/src/images/Neutral.png differ
diff --git a/apps/face_emotion_game/src/images/Sad.png b/apps/face_emotion_game/src/images/Sad.png
new file mode 100644
index 00000000..7863809e
Binary files /dev/null and b/apps/face_emotion_game/src/images/Sad.png differ
diff --git a/apps/face_emotion_game/src/images/Surprise.png b/apps/face_emotion_game/src/images/Surprise.png
new file mode 100644
index 00000000..da225214
Binary files /dev/null and b/apps/face_emotion_game/src/images/Surprise.png differ
diff --git a/apps/face_emotion_game/src/images/face_emotion_game.png b/apps/face_emotion_game/src/images/face_emotion_game.png
new file mode 100644
index 00000000..626c2925
Binary files /dev/null and b/apps/face_emotion_game/src/images/face_emotion_game.png differ
diff --git a/networks/face_detection_retail_0004/image.jpg b/networks/face_detection_retail_0004/image.jpg
new file mode 100644
index 00000000..3a0d847c
Binary files /dev/null and b/networks/face_detection_retail_0004/image.jpg differ