In [1]:
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")

data = pd.read_csv('./fer2013.csv')

width, height = 48, 48

datapoints = data['pixels'].tolist()

#getting features for training
X = []
for xseq in datapoints:
    xx = [int(xp) for xp in xseq.split(' ')]
    xx = np.asarray(xx).reshape(width, height)
    X.append(xx.astype('float32'))

X = np.asarray(X)
X = np.expand_dims(X, -1)

#getting labels for training
y = pd.get_dummies(data['emotion']).as_matrix()

#storing them using numpy
np.save('fdataX', X)
np.save('flabels', y)

print("Preprocessing Done")
print("Number of Features: "+str(len(X[0])))
print("Number of Labels: "+ str(len(y[0])))
print("Number of examples in dataset:"+str(len(X)))
print("X,y stored in fdataX.npy and flabels.npy respectively")
Preprocessing Done
Number of Features: 48
Number of Labels: 7
Number of examples in dataset:35887
X,y stored in fdataX.npy and flabels.npy respectively
In [4]:
import sys, os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint
from keras.models import load_model
from keras.models import model_from_json


num_features = 64
num_labels = 7
batch_size = 64
epochs = 100
width, height = 48, 48

x = np.load('./fdataX.npy')
y = np.load('./flabels.npy')

x -= np.mean(x, axis=0)
x /= np.std(x, axis=0)

#for xx in range(10):
#    plt.figure(xx)
#    plt.imshow(x[xx].reshape((48, 48)), interpolation='none', cmap='gray')
#plt.show()

#splitting into training, validation and testing data
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.1, random_state=41)

#saving the test samples to be used later
np.save('modXtest', X_test)
np.save('modytest', y_test)

#desinging the CNN
model = Sequential()

model.add(Conv2D(num_features, kernel_size=(3, 3), activation='relu', input_shape=(width, height, 1), data_format='channels_last', kernel_regularizer=l2(0.01)))
model.add(Conv2D(num_features, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))

model.add(Conv2D(2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))

model.add(Conv2D(2*2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(2*2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))

model.add(Conv2D(2*2*2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(2*2*2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))

model.add(Flatten())

model.add(Dense(2*2*2*num_features, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(2*2*num_features, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(2*num_features, activation='relu'))
model.add(Dropout(0.5))

model.add(Dense(num_labels, activation='softmax'))

#model.summary()

#Compliling the model with adam optimixer and categorical crossentropy loss
model.compile(loss=categorical_crossentropy,
              optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7),
              metrics=['accuracy'])

#training the model
model.fit(np.array(X_train), np.array(y_train),
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(np.array(X_valid), np.array(y_valid)),
          shuffle=True)

#saving the  model to be used later
fer_json = model.to_json()
with open("fer.json", "w") as json_file:
    json_file.write(fer_json)
model.save_weights("fer.h5")
print("Saved model to disk")
Using TensorFlow backend.
Train on 29068 samples, validate on 3230 samples
Epoch 1/100
29068/29068 [==============================] - 1031s 35ms/step - loss: 1.9894 - accuracy: 0.2131 - val_loss: 1.8308 - val_accuracy: 0.2557
Epoch 2/100
29068/29068 [==============================] - 1022s 35ms/step - loss: 1.8419 - accuracy: 0.2448 - val_loss: 1.7959 - val_accuracy: 0.2594
Epoch 3/100
29068/29068 [==============================] - 1023s 35ms/step - loss: 1.8059 - accuracy: 0.2593 - val_loss: 1.7525 - val_accuracy: 0.2830
Epoch 4/100
29068/29068 [==============================] - 972s 33ms/step - loss: 1.7135 - accuracy: 0.3052 - val_loss: 1.6654 - val_accuracy: 0.3440
Epoch 5/100
29068/29068 [==============================] - 1057s 36ms/step - loss: 1.5969 - accuracy: 0.3650 - val_loss: 1.4396 - val_accuracy: 0.4207
Epoch 6/100
29068/29068 [==============================] - 1081s 37ms/step - loss: 1.4976 - accuracy: 0.4074 - val_loss: 1.4013 - val_accuracy: 0.4548
Epoch 7/100
29068/29068 [==============================] - 1020s 35ms/step - loss: 1.4399 - accuracy: 0.4326 - val_loss: 1.3470 - val_accuracy: 0.4656
Epoch 8/100
29068/29068 [==============================] - 1038s 36ms/step - loss: 1.4008 - accuracy: 0.4549 - val_loss: 1.2982 - val_accuracy: 0.4913
Epoch 9/100
29068/29068 [==============================] - 1046s 36ms/step - loss: 1.3651 - accuracy: 0.4727 - val_loss: 1.3060 - val_accuracy: 0.5034
Epoch 10/100
29068/29068 [==============================] - 1060s 36ms/step - loss: 1.3321 - accuracy: 0.4954 - val_loss: 1.2727 - val_accuracy: 0.5223
Epoch 11/100
29068/29068 [==============================] - 1066s 37ms/step - loss: 1.3005 - accuracy: 0.5031 - val_loss: 1.2232 - val_accuracy: 0.5337
Epoch 12/100
29068/29068 [==============================] - 982s 34ms/step - loss: 1.2747 - accuracy: 0.5208 - val_loss: 1.2060 - val_accuracy: 0.5381
Epoch 13/100
29068/29068 [==============================] - 936s 32ms/step - loss: 1.2459 - accuracy: 0.5351 - val_loss: 1.1569 - val_accuracy: 0.5666
Epoch 14/100
29068/29068 [==============================] - 961s 33ms/step - loss: 1.2210 - accuracy: 0.5455 - val_loss: 1.1460 - val_accuracy: 0.5793
Epoch 15/100
29068/29068 [==============================] - 978s 34ms/step - loss: 1.2034 - accuracy: 0.5544 - val_loss: 1.1461 - val_accuracy: 0.5752
Epoch 16/100
29068/29068 [==============================] - 979s 34ms/step - loss: 1.1717 - accuracy: 0.5657 - val_loss: 1.1294 - val_accuracy: 0.5759
Epoch 17/100
29068/29068 [==============================] - 979s 34ms/step - loss: 1.1507 - accuracy: 0.5774 - val_loss: 1.1026 - val_accuracy: 0.5873
Epoch 18/100
29068/29068 [==============================] - 979s 34ms/step - loss: 1.1326 - accuracy: 0.5880 - val_loss: 1.1078 - val_accuracy: 0.5932
Epoch 19/100
29068/29068 [==============================] - 979s 34ms/step - loss: 1.1145 - accuracy: 0.5931 - val_loss: 1.0948 - val_accuracy: 0.5969
Epoch 20/100
29068/29068 [==============================] - 979s 34ms/step - loss: 1.0965 - accuracy: 0.6019 - val_loss: 1.0868 - val_accuracy: 0.5994
Epoch 21/100
29068/29068 [==============================] - 979s 34ms/step - loss: 1.0645 - accuracy: 0.6125 - val_loss: 1.0662 - val_accuracy: 0.6180
Epoch 22/100
29068/29068 [==============================] - 978s 34ms/step - loss: 1.0473 - accuracy: 0.6189 - val_loss: 1.0440 - val_accuracy: 0.6232
Epoch 23/100
29068/29068 [==============================] - 978s 34ms/step - loss: 1.0292 - accuracy: 0.6203 - val_loss: 1.0578 - val_accuracy: 0.6090
Epoch 24/100
29068/29068 [==============================] - 979s 34ms/step - loss: 1.0177 - accuracy: 0.6309 - val_loss: 1.0698 - val_accuracy: 0.6142
Epoch 25/100
29068/29068 [==============================] - 985s 34ms/step - loss: 0.9900 - accuracy: 0.6414 - val_loss: 1.0645 - val_accuracy: 0.6211
Epoch 26/100
29068/29068 [==============================] - 979s 34ms/step - loss: 0.9780 - accuracy: 0.6464 - val_loss: 1.0203 - val_accuracy: 0.6176
Epoch 27/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.9594 - accuracy: 0.6545 - val_loss: 1.0303 - val_accuracy: 0.6282
Epoch 28/100
29068/29068 [==============================] - 988s 34ms/step - loss: 0.9342 - accuracy: 0.6612 - val_loss: 1.0157 - val_accuracy: 0.6313
Epoch 29/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.9206 - accuracy: 0.6661 - val_loss: 1.0176 - val_accuracy: 0.6440
Epoch 30/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.9066 - accuracy: 0.6759 - val_loss: 1.0160 - val_accuracy: 0.6378
Epoch 31/100
29068/29068 [==============================] - 979s 34ms/step - loss: 0.8853 - accuracy: 0.6823 - val_loss: 1.0166 - val_accuracy: 0.6446
Epoch 32/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.8745 - accuracy: 0.6834 - val_loss: 1.0212 - val_accuracy: 0.6375
Epoch 33/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.8572 - accuracy: 0.6889 - val_loss: 1.0251 - val_accuracy: 0.6498
Epoch 34/100
29068/29068 [==============================] - 979s 34ms/step - loss: 0.8395 - accuracy: 0.6954 - val_loss: 1.0039 - val_accuracy: 0.6539
Epoch 35/100
29068/29068 [==============================] - 979s 34ms/step - loss: 0.8283 - accuracy: 0.7032 - val_loss: 1.0094 - val_accuracy: 0.6514
Epoch 36/100
29068/29068 [==============================] - 979s 34ms/step - loss: 0.8153 - accuracy: 0.7081 - val_loss: 1.0039 - val_accuracy: 0.6638
Epoch 37/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.8091 - accuracy: 0.7128 - val_loss: 1.0012 - val_accuracy: 0.6628
Epoch 38/100
29068/29068 [==============================] - 979s 34ms/step - loss: 0.7904 - accuracy: 0.7197 - val_loss: 1.0219 - val_accuracy: 0.6498
Epoch 39/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.7672 - accuracy: 0.7274 - val_loss: 1.0293 - val_accuracy: 0.6474
Epoch 40/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.7528 - accuracy: 0.7344 - val_loss: 1.0455 - val_accuracy: 0.6582
Epoch 41/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.7424 - accuracy: 0.7371 - val_loss: 1.0794 - val_accuracy: 0.6514
Epoch 42/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.7233 - accuracy: 0.7445 - val_loss: 1.0578 - val_accuracy: 0.6548
Epoch 43/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.7069 - accuracy: 0.7515 - val_loss: 1.0978 - val_accuracy: 0.6526
Epoch 44/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.6978 - accuracy: 0.7558 - val_loss: 1.0607 - val_accuracy: 0.6598
Epoch 45/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.6899 - accuracy: 0.7579 - val_loss: 1.1016 - val_accuracy: 0.6604
Epoch 46/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.6794 - accuracy: 0.7598 - val_loss: 1.0566 - val_accuracy: 0.6529
Epoch 47/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.6817 - accuracy: 0.7610 - val_loss: 1.0364 - val_accuracy: 0.6669
Epoch 48/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.6547 - accuracy: 0.7718 - val_loss: 1.0459 - val_accuracy: 0.6628
Epoch 49/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.6384 - accuracy: 0.7777 - val_loss: 1.0989 - val_accuracy: 0.6601
Epoch 50/100
29068/29068 [==============================] - 979s 34ms/step - loss: 0.6342 - accuracy: 0.7820 - val_loss: 1.0467 - val_accuracy: 0.6663
Epoch 51/100
29068/29068 [==============================] - 978s 34ms/step - loss: 0.6132 - accuracy: 0.7900 - val_loss: 1.0821 - val_accuracy: 0.6613
Epoch 52/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.6140 - accuracy: 0.7851 - val_loss: 1.0682 - val_accuracy: 0.6641
Epoch 53/100
29068/29068 [==============================] - 966s 33ms/step - loss: 0.5991 - accuracy: 0.7962 - val_loss: 1.0985 - val_accuracy: 0.6644
Epoch 54/100
29068/29068 [==============================] - 950s 33ms/step - loss: 0.5918 - accuracy: 0.7933 - val_loss: 1.0999 - val_accuracy: 0.6653
Epoch 55/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.5798 - accuracy: 0.8016 - val_loss: 1.1336 - val_accuracy: 0.6613
Epoch 56/100
29068/29068 [==============================] - 963s 33ms/step - loss: 0.5706 - accuracy: 0.8070 - val_loss: 1.1013 - val_accuracy: 0.6625
Epoch 57/100
29068/29068 [==============================] - 946s 33ms/step - loss: 0.5618 - accuracy: 0.8082 - val_loss: 1.1059 - val_accuracy: 0.6700
Epoch 58/100
29068/29068 [==============================] - 972s 33ms/step - loss: 0.5591 - accuracy: 0.8086 - val_loss: 1.0761 - val_accuracy: 0.6619
Epoch 59/100
29068/29068 [==============================] - 972s 33ms/step - loss: 0.5513 - accuracy: 0.8111 - val_loss: 1.1479 - val_accuracy: 0.6681
Epoch 60/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.5372 - accuracy: 0.8201 - val_loss: 1.1574 - val_accuracy: 0.6533
Epoch 61/100
29068/29068 [==============================] - 972s 33ms/step - loss: 0.5267 - accuracy: 0.8213 - val_loss: 1.1802 - val_accuracy: 0.6684
Epoch 62/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.5078 - accuracy: 0.8286 - val_loss: 1.1876 - val_accuracy: 0.6548
Epoch 63/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.5088 - accuracy: 0.8297 - val_loss: 1.1393 - val_accuracy: 0.6706
Epoch 64/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4937 - accuracy: 0.8359 - val_loss: 1.1993 - val_accuracy: 0.6582
Epoch 65/100
29068/29068 [==============================] - 973s 33ms/step - loss: 0.4982 - accuracy: 0.8334 - val_loss: 1.1494 - val_accuracy: 0.6681
Epoch 66/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4814 - accuracy: 0.8380 - val_loss: 1.2499 - val_accuracy: 0.6709
Epoch 67/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4810 - accuracy: 0.8386 - val_loss: 1.2697 - val_accuracy: 0.6687
Epoch 68/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4651 - accuracy: 0.8467 - val_loss: 1.1749 - val_accuracy: 0.6672
Epoch 69/100
29068/29068 [==============================] - 972s 33ms/step - loss: 0.4532 - accuracy: 0.8509 - val_loss: 1.2082 - val_accuracy: 0.6650
Epoch 70/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4580 - accuracy: 0.8487 - val_loss: 1.2373 - val_accuracy: 0.6715
Epoch 71/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4536 - accuracy: 0.8489 - val_loss: 1.2182 - val_accuracy: 0.6687
Epoch 72/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4296 - accuracy: 0.8557 - val_loss: 1.2785 - val_accuracy: 0.6588
Epoch 73/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4439 - accuracy: 0.8527 - val_loss: 1.2874 - val_accuracy: 0.6718
Epoch 74/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4248 - accuracy: 0.8595 - val_loss: 1.2402 - val_accuracy: 0.6681
Epoch 75/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4202 - accuracy: 0.8631 - val_loss: 1.2361 - val_accuracy: 0.6656
Epoch 76/100
29068/29068 [==============================] - 971s 33ms/step - loss: 0.4110 - accuracy: 0.8648 - val_loss: 1.2188 - val_accuracy: 0.6610
Epoch 77/100
29068/29068 [==============================] - 972s 33ms/step - loss: 0.4036 - accuracy: 0.8675 - val_loss: 1.2732 - val_accuracy: 0.6669
Epoch 78/100
29068/29068 [==============================] - 949s 33ms/step - loss: 0.4027 - accuracy: 0.8671 - val_loss: 1.1991 - val_accuracy: 0.6594
Epoch 79/100
29068/29068 [==============================] - 942s 32ms/step - loss: 0.3941 - accuracy: 0.8721 - val_loss: 1.3174 - val_accuracy: 0.6659
Epoch 80/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.3986 - accuracy: 0.8723 - val_loss: 1.2801 - val_accuracy: 0.6548
Epoch 81/100
29068/29068 [==============================] - 977s 34ms/step - loss: 0.3820 - accuracy: 0.8749 - val_loss: 1.2886 - val_accuracy: 0.6598
Epoch 82/100
29068/29068 [==============================] - 980s 34ms/step - loss: 0.3731 - accuracy: 0.8779 - val_loss: 1.3292 - val_accuracy: 0.6598
Epoch 83/100
29068/29068 [==============================] - 976s 34ms/step - loss: 0.3700 - accuracy: 0.8818 - val_loss: 1.3075 - val_accuracy: 0.6759
Epoch 84/100
29068/29068 [==============================] - 974s 34ms/step - loss: 0.3820 - accuracy: 0.8779 - val_loss: 1.2919 - val_accuracy: 0.6653
Epoch 85/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.3704 - accuracy: 0.8816 - val_loss: 1.3239 - val_accuracy: 0.6669
Epoch 86/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.3709 - accuracy: 0.8808 - val_loss: 1.2711 - val_accuracy: 0.6737
Epoch 87/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.3542 - accuracy: 0.8863 - val_loss: 1.3834 - val_accuracy: 0.6731
Epoch 88/100
29068/29068 [==============================] - 974s 34ms/step - loss: 0.3604 - accuracy: 0.8865 - val_loss: 1.3167 - val_accuracy: 0.6672
Epoch 89/100
29068/29068 [==============================] - 974s 34ms/step - loss: 0.3476 - accuracy: 0.8890 - val_loss: 1.3254 - val_accuracy: 0.6604
Epoch 90/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.3446 - accuracy: 0.8887 - val_loss: 1.2953 - val_accuracy: 0.6647
Epoch 91/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.3431 - accuracy: 0.8885 - val_loss: 1.3950 - val_accuracy: 0.6632
Epoch 92/100
29068/29068 [==============================] - 974s 34ms/step - loss: 0.3354 - accuracy: 0.8933 - val_loss: 1.2779 - val_accuracy: 0.6582
Epoch 93/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.3300 - accuracy: 0.8926 - val_loss: 1.3642 - val_accuracy: 0.6625
Epoch 94/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.3191 - accuracy: 0.9000 - val_loss: 1.4065 - val_accuracy: 0.6659
Epoch 95/100
29068/29068 [==============================] - 975s 34ms/step - loss: 0.3273 - accuracy: 0.8946 - val_loss: 1.3793 - val_accuracy: 0.6703
Epoch 96/100
29068/29068 [==============================] - 974s 34ms/step - loss: 0.3248 - accuracy: 0.8982 - val_loss: 1.4116 - val_accuracy: 0.6594
Epoch 97/100
29068/29068 [==============================] - 976s 34ms/step - loss: 0.3187 - accuracy: 0.8988 - val_loss: 1.4078 - val_accuracy: 0.6697
Epoch 98/100
29068/29068 [==============================] - 986s 34ms/step - loss: 0.3067 - accuracy: 0.9025 - val_loss: 1.3945 - val_accuracy: 0.6641
Epoch 99/100
29068/29068 [==============================] - 976s 34ms/step - loss: 0.3085 - accuracy: 0.9024 - val_loss: 1.3889 - val_accuracy: 0.6635
Epoch 100/100
29068/29068 [==============================] - 976s 34ms/step - loss: 0.3061 - accuracy: 0.9035 - val_loss: 1.5171 - val_accuracy: 0.6663
Saved model to disk
In [5]:
# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np

json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

truey=[]
predy=[]
x = np.load('./modXtest.npy')
y = np.load('./modytest.npy')

yhat= loaded_model.predict(x)
yh = yhat.tolist()
yt = y.tolist()
count = 0

for i in range(len(y)):
    yy = max(yh[i])
    yyt = max(yt[i])
    predy.append(yh[i].index(yy))
    truey.append(yt[i].index(yyt))
    if(yh[i].index(yy)== yt[i].index(yyt)):
        count+=1

acc = (count/len(y))*100

#saving values for confusion matrix and analysis
np.save('truey', truey)
np.save('predy', predy)
print("Predicted and true label values saved")
print("Accuracy on test set :"+str(acc)+"%")
Loaded model from disk
Predicted and true label values saved
Accuracy on test set :64.7812761214823%
In [14]:
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix

y_true = np.load('./truey.npy')
y_pred = np.load('./predy.npy')
cm = confusion_matrix(y_true, y_pred)
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
title='Confusion matrix'
print(cm)

plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
    plt.text(j, i, format(cm[i, j], fmt),
            horizontalalignment="center",
            color="white" if cm[i, j] > thresh else "black")

plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.show()
[[261   6  55  28  69  14  65]
 [  9  31   4   2   3   1   2]
 [ 48   1 249  30 101  54  62]
 [ 24   0   9 759  18  14  57]
 [ 55   2  70  26 293   5 137]
 [  8   0  38  41   1 308  18]
 [ 31   1  29  51  69   6 424]]
In [25]:
################################################## Angry ###################################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2

#loading the model
json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

#setting image resizing parameters
WIDTH = 48
HEIGHT = 48
x=None
y=None
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

#loading image
full_size_image = cv2.imread("angry.jpg")
print("Image Loaded")
gray=cv2.cvtColor(full_size_image,cv2.COLOR_RGB2GRAY)
face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
faces = face.detectMultiScale(gray, 1.3  , 10)

#detecting faces
for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
        cv2.rectangle(full_size_image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        #predicting the emotion
        yhat= loaded_model.predict(cropped_img)
        cv2.putText(full_size_image, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
        print("Emotion: "+labels[int(np.argmax(yhat))])

cv2.imshow('Emotion', full_size_image)
cv2.waitKey() 
Loaded model from disk
Image Loaded
Emotion: Angry
Out[25]:
-1
In [26]:
################################################## Disgust ###################################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2

#loading the model
json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

#setting image resizing parameters
WIDTH = 48
HEIGHT = 48
x=None
y=None
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

#loading image
full_size_image = cv2.imread("disgust.jpg")
print("Image Loaded")
gray=cv2.cvtColor(full_size_image,cv2.COLOR_RGB2GRAY)
face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
faces = face.detectMultiScale(gray, 1.3  , 10)

#detecting faces
for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
        cv2.rectangle(full_size_image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        #predicting the emotion
        yhat= loaded_model.predict(cropped_img)
        cv2.putText(full_size_image, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
        print("Emotion: "+labels[int(np.argmax(yhat))])

cv2.imshow('Emotion', full_size_image)
cv2.waitKey() 
Loaded model from disk
Image Loaded
Emotion: Neutral
Out[26]:
-1
In [27]:
################################################## Fear ###################################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2

#loading the model
json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

#setting image resizing parameters
WIDTH = 48
HEIGHT = 48
x=None
y=None
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

#loading image
full_size_image = cv2.imread("fear.jpg")
print("Image Loaded")
gray=cv2.cvtColor(full_size_image,cv2.COLOR_RGB2GRAY)
face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
faces = face.detectMultiScale(gray, 1.3  , 10)

#detecting faces
for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
        cv2.rectangle(full_size_image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        #predicting the emotion
        yhat= loaded_model.predict(cropped_img)
        cv2.putText(full_size_image, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
        print("Emotion: "+labels[int(np.argmax(yhat))])

cv2.imshow('Emotion', full_size_image)
cv2.waitKey() 
Loaded model from disk
Image Loaded
Emotion: Fear
Out[27]:
-1
In [28]:
################################################## Happy ###################################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2

#loading the model
json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

#setting image resizing parameters
WIDTH = 48
HEIGHT = 48
x=None
y=None
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

#loading image
full_size_image = cv2.imread("happy.jpg")
print("Image Loaded")
gray=cv2.cvtColor(full_size_image,cv2.COLOR_RGB2GRAY)
face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
faces = face.detectMultiScale(gray, 1.3  , 10)

#detecting faces
for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
        cv2.rectangle(full_size_image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        #predicting the emotion
        yhat= loaded_model.predict(cropped_img)
        cv2.putText(full_size_image, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
        print("Emotion: "+labels[int(np.argmax(yhat))])

cv2.imshow('Emotion', full_size_image)
cv2.waitKey() 
Loaded model from disk
Image Loaded
Emotion: Happy
Out[28]:
-1
In [29]:
################################################## Sad ###################################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2

#loading the model
json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

#setting image resizing parameters
WIDTH = 48
HEIGHT = 48
x=None
y=None
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

#loading image
full_size_image = cv2.imread("sad.jpg")
print("Image Loaded")
gray=cv2.cvtColor(full_size_image,cv2.COLOR_RGB2GRAY)
face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
faces = face.detectMultiScale(gray, 1.3  , 10)

#detecting faces
for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
        cv2.rectangle(full_size_image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        #predicting the emotion
        yhat= loaded_model.predict(cropped_img)
        cv2.putText(full_size_image, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
        print("Emotion: "+labels[int(np.argmax(yhat))])

cv2.imshow('Emotion', full_size_image)
cv2.waitKey() 
Loaded model from disk
Image Loaded
Emotion: Sad
Out[29]:
-1
In [30]:
################################################## Surprise ###################################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2

#loading the model
json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

#setting image resizing parameters
WIDTH = 48
HEIGHT = 48
x=None
y=None
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

#loading image
full_size_image = cv2.imread("surprise.jpg")
print("Image Loaded")
gray=cv2.cvtColor(full_size_image,cv2.COLOR_RGB2GRAY)
face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
faces = face.detectMultiScale(gray, 1.3  , 10)

#detecting faces
for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
        cv2.rectangle(full_size_image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        #predicting the emotion
        yhat= loaded_model.predict(cropped_img)
        cv2.putText(full_size_image, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
        print("Emotion: "+labels[int(np.argmax(yhat))])

cv2.imshow('Emotion', full_size_image)
cv2.waitKey() 
Loaded model from disk
Image Loaded
Emotion: Surprise
Out[30]:
-1
In [31]:
################################################## Neutral ###################################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2

#loading the model
json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

#setting image resizing parameters
WIDTH = 48
HEIGHT = 48
x=None
y=None
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

#loading image
full_size_image = cv2.imread("neutral.jpg")
print("Image Loaded")
gray=cv2.cvtColor(full_size_image,cv2.COLOR_RGB2GRAY)
face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
faces = face.detectMultiScale(gray, 1.3  , 10)

#detecting faces
for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
        cv2.rectangle(full_size_image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        #predicting the emotion
        yhat= loaded_model.predict(cropped_img)
        cv2.putText(full_size_image, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
        print("Emotion: "+labels[int(np.argmax(yhat))])

cv2.imshow('Emotion', full_size_image)
cv2.waitKey() 
Loaded model from disk
Image Loaded
Emotion: Neutral
Out[31]:
-1
In [32]:
######################################## Camera Capture ###################################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2

# 一般筆記本的預設攝像頭都是0
capInput = cv2.VideoCapture(0)
# 我們可以用這條命令檢測攝像頭是否可以讀取資料
if not capInput.isOpened(): print('Capture failed because of camera')
    
# 接上段程式
# 現在攝像頭已經開啟了,我們可以使用這條命令讀取影象
# img就是我們讀取到的影象,就和我們使用open('pic.jpg', 'rb').read()讀取到的資料是一樣的
ret, img = capInput.read()
# 你可以使用open的方式儲存,也可以使用cv2提供的方式儲存
cv2.imwrite('pic.jpg', img)
# 同樣,你可以使用open的方式讀取,也可以使用cv2提供的方式讀取
img = cv2.imread('pic.jpg')

# 釋放攝像頭
capInput.release()


#loading the model
json_file = open('fer.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("fer.h5")
print("Loaded model from disk")

#setting image resizing parameters
WIDTH = 48
HEIGHT = 48
x=None
y=None
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

#loading image
full_size_image = cv2.imread("pic.jpg")
print("Image Loaded")
gray=cv2.cvtColor(full_size_image,cv2.COLOR_RGB2GRAY)
face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
faces = face.detectMultiScale(gray, 1.3  , 10)

#detecting faces
for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
        cv2.rectangle(full_size_image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        #predicting the emotion
        yhat= loaded_model.predict(cropped_img)
        cv2.putText(full_size_image, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
        print("Emotion: "+labels[int(np.argmax(yhat))])

cv2.imshow('Emotion', full_size_image)
cv2.waitKey() 
Loaded model from disk
Image Loaded
Emotion: Angry
Out[32]:
-1
In [33]:
################################# Camera Detector ##################################################
################################# Press Q To Stop ##################################################

# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
import cv2
import sys
import gc
import time

labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

if __name__ == '__main__':  
    #載入模型
    #loading the model
    json_file = open('fer.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    # load weights into new model
    model.load_weights("fer.h5")
              
    #框住人臉的矩形邊框顏色       
    color = (0, 255, 0)
    
    #捕獲指定攝像頭的實時視訊流
    cap = cv2.VideoCapture(0)
    

    
    #迴圈檢測識別人臉
    while True:
        ret, frame = cap.read()   #讀取一幀視訊
        
        if ret is True:
            
            #影象灰化,降低計算複雜度
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        else:
            continue
        #使用人臉識別分類器,讀入分類器            

        face = cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontalface_default.xml")
        faces = face.detectMultiScale(gray, 1.3  , 10)
        #利用分類器識別出哪個區域為人臉
        faceRects = face.detectMultiScale(gray, scaleFactor = 1.2, minNeighbors = 3, minSize = (32, 32))        
        if len(faceRects) > 0:                 
            for faceRect in faceRects: 
                x, y, w, h = faceRect
                
               #detecting faces
            for (x, y, w, h) in faces:
                    roi_gray = gray[y:y + h, x:x + w]
                    cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
                    cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
                    #predicting the emotion
                    yhat= model.predict(cropped_img)
                    cv2.putText(frame, labels[int(np.argmax(yhat))], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
                    print("Emotion: "+labels[int(np.argmax(yhat))])

        cv2.imshow("Facial Emotion Recognition", frame)
        
        #等待10毫秒看是否有按鍵輸入
        k = cv2.waitKey(10)
        #如果輸入q則退出迴圈
        if k & 0xFF == ord('q'):
            break

    #釋放攝像頭並銷燬所有視窗
    cap.release()
    cv2.waitKey() 
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Happy
Emotion: Neutral
Emotion: Neutral
Emotion: Neutral
Emotion: Fear
Emotion: Happy
Emotion: Happy
Emotion: Happy
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Fear
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Fear
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Fear
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Sad
Emotion: Sad
Emotion: Sad
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Fear
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Neutral
Emotion: Happy
Emotion: Happy
Emotion: Angry
Emotion: Happy
Emotion: Sad
Emotion: Happy
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Fear
Emotion: Happy
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Neutral
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Fear
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Sad
Emotion: Sad
Emotion: Sad
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Sad
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Sad
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Happy
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Fear
Emotion: Angry
Emotion: Fear
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Fear
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Happy
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
Emotion: Angry
In [ ]: