Similarity explanations for MNIST
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.layers import Activation, Conv2D, Dense, Dropout
from tensorflow.keras.layers import Flatten, Input, Reshape, MaxPooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.losses import categorical_crossentropy
from alibi.explainers import GradientSimilarityUtils
def plot_similar(ds, expls, figsize=(20, 20)):
"""Plots original instances and similar instances.
Parameters
----------
ds
List of dictionaries containing instances to plot, labels and predictions.
expls
Similarity explainer explanation object.
figsize
Figure size.
Returns
------
None
"""
fig, axes = plt.subplots(5, 6, figsize=figsize, sharex=False)
for j in range(len(ds)):
d = ds[j]
axes[j, 0].imshow(np.squeeze(d['x']), cmap='gray')
axes[j, 0].axis('off')
if j == 0:
title_orig = "Original instance"
axes[j, 0].set_title(f"{title_orig} \n" +
f"{len(title_orig) * '='} \n" +
f"Label: {d['y']} - Prediction: {d['pred']} ")
else:
axes[j, 0].set_title(f"Label: {d['y']} - Prediction: {d['pred']} ")
for i in range(expls.data['most_similar'].shape[0]):
most_similar = np.squeeze(expls.data['most_similar'][j][i])
axes[j, i + 1].imshow(most_similar, cmap='gray')
axes[i, i + 1].axis('off')
if j == 0:
title_most_sim = f"{i+1}{append_int(i+1)} most similar instance"
axes[j, i + 1].set_title(f"{title_most_sim} \n" +
f"{len(title_most_sim) * '='} \n"+
f"Label: {d['y_sim'][i]} - Prediction: {d['preds_sim'][i]}")
else:
axes[j, i + 1].set_title(f"Label: {d['y_sim'][i]} - Prediction: {d['preds_sim'][i]}")
plt.show()
def plot_distributions(ds, expls, figsize=(20, 20)):
"""Plots original instances and scores distributions per class.
Parameters
----------
ds
List of dictionaries containing instances to plot, labels and predictions.
expls
Similarity explainer explanation object.
figsize
Figure size.
Returns
------
None
"""
fig, axes = plt.subplots(5, 3, figsize=figsize, sharex=False)
for i in range(len(ds)):
d = ds[i]
y_sim = d['y_sim']
preds_sim = d['preds_sim']
y = d['y']
pred = d['pred']
df_ditribution = pd.DataFrame({'y_sim': y_sim,
'preds_sim': preds_sim,
'scores': expls.data['scores'][i]})
axes[i, 0].imshow(np.squeeze(d['x']), cmap='gray')
axes[i, 0].axis('off')
if i == 0:
title_orig = "Original instance"
axes[i, 0].set_title(f"{title_orig} \n " +
f"{len(title_orig) * '='} \n" +
f"Label: {d['y']} - Prediction: {d['pred']} ")
else:
axes[i, 0].set_title(f"Label: {d['y']} - Prediction: {d['pred']}")
df_y = df_ditribution.groupby('y_sim')['scores'].mean().sort_values(ascending=False)
df_y.plot(kind='bar', ax=axes[i, 1])
if i == 0:
title_true_class = "Averaged scores for each true class in reference set"
axes[i, 1].set_title(f"{title_true_class} \n" +
f"{len(title_true_class) * '='} \n ")
df_preds = df_ditribution.groupby('preds_sim')['scores'].mean().sort_values(ascending=False)
df_preds.plot(kind='bar', ax=axes[i, 2])
if i == 0:
title_pred_class = "Averaged scores for each predicted class in reference set"
axes[i, 2].set_title(f"{title_pred_class} \n" +
f"{len(title_pred_class) * '='} \n ")
plt.show()
def append_int(num):
"""Converts an integer into an ordinal (ex. 1 -> 1st, 2 -> 2nd, etc.).
Parameters
----------
num
Integer number
Returns
-------
Ordinal suffixes
"""
if num > 9:
secondToLastDigit = str(num)[-2]
if secondToLastDigit == '1':
return 'th'
lastDigit = num % 10
if (lastDigit == 1):
return 'st'
elif (lastDigit == 2):
return 'nd'
elif (lastDigit == 3):
return 'rd'
else:
return 'th'Load data
Train model
Find similar instances
Visualizations
Most similar instances

Most similar labels distributions

Last updated
Was this helpful?

