Wine experts can identify wines from specific vineyards through smell and taste, but the factors that give different wines their individual charateristics are actually based on their chemical composition.
In this challenge, you must train a classification model to analyze the chemical and visual features of wine samples and classify them based on their cultivar (grape variety).
Citation: The data used in this exercise was originally collected by Forina, M. et al.
PARVUS - An Extendible Package for Data Exploration, Classification and Correlation. Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy.
It can be downloaded from the UCI dataset repository (Dua, D. and Graff, C. (2019). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science).
Run the following cell to load a CSV file of wine data, which consists of 12 numeric features and a classification label with the following classes:
import pandas as pd
# load the training dataset
data = pd.read_csv('data/wine.csv')
sample = data.sample(10)
data.shape
data.describe()
Your challenge is to explore the data and train a classification model that achieves an overall Recall metric of over 0.95
Add markdown and code cells as required to to explore the data, train a model, and evaluate the model's predictive performance.
# Separate features and labels
features = ['Alcohol', 'Malic_acid', 'Ash', 'Alcalinity', 'Magnesium', 'Phenols', 'Flavanoids',
'Nonflavanoids', 'Proanthocyanins', 'Color_intensity', 'Hue', 'OD280_315_of_diluted_wines',
'Proline']
label = 'WineVariety'
X, y = data[features].values, data[label].values
for n in range(0,4):
print("Wine", str(n+1), "\n Features:",list(X[n]), "\n Label:", y[n])
# Count the number of null values for each column
data.isnull().sum()
from matplotlib import pyplot as plt
%matplotlib inline
features = ['Alcohol', 'Malic_acid', 'Ash', 'Alcalinity', 'Magnesium', 'Phenols', 'Flavanoids',
'Nonflavanoids', 'Proanthocyanins', 'Color_intensity', 'Hue', 'OD280_315_of_diluted_wines',
'Proline']
for col in features:
data.boxplot(column=col, by='WineVariety', figsize=(6,6))
plt.title(col)
plt.show()
from sklearn.model_selection import train_test_split
# Split data 70%-30% into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
print ('Training cases: %d\nTest cases: %d' % (X_train.shape[0], X_test.shape[0]))
# Train the model
from sklearn.linear_model import LogisticRegression
# Set regularization rate
reg = 0.01
# train a logistic regression model on the training set
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
print (model)
predictions = model.predict(X_test)
print('Predicted labels: ', predictions)
print('Actual labels: ' ,y_test)
from sklearn. metrics import classification_report
print(classification_report(y_test, predictions))
from sklearn.metrics import accuracy_score, precision_score, recall_score
print("Overall Accuracy:",accuracy_score(y_test, predictions))
print("Overall Precision:",precision_score(y_test, predictions, average='macro'))
print("Overall Recall:",recall_score(y_test, predictions, average='macro'))
from sklearn.metrics import confusion_matrix
# Print the confusion matrix
mcm = confusion_matrix(y_test, predictions)
print (mcm)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(mcm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
classes = ['Variety A', 'Variety B', 'Variety C']
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.xlabel("Predicted Variety")
plt.ylabel("Actual Variety")
plt.show()
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# Get class probability scores
prob = model.predict_proba(X_test)
# Get ROC metrics for each class
fpr = {}
tpr = {}
thresh ={}
for i in range(len(classes)):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test, prob[:,i], pos_label=i)
# Plot the ROC chart
plt.plot(fpr[0], tpr[0], linestyle='--',color='orange', label=classes[0] + ' vs Rest')
plt.plot(fpr[1], tpr[1], linestyle='--',color='green', label=classes[1] + ' vs Rest')
plt.plot(fpr[2], tpr[2], linestyle='--',color='blue', label=classes[2] + ' vs Rest')
plt.title('Multiclass ROC curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive rate')
plt.legend(loc='best')
plt.show()
auc = roc_auc_score(y_test, prob, multi_class='ovr')
print('Average AUC:', auc)
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# Define preprocessing for numeric columns (scale them)
feature_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12]
feature_transformer = Pipeline(steps=[
('scaler', StandardScaler())
])
# Create preprocessing steps
preprocessor = ColumnTransformer(
transformers=[
('preprocess', feature_transformer, feature_columns)])
# Create training pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', SVC(probability=True))])
# fit the pipeline to train a linear regression model on the training set
scaledsvm_model = pipeline.fit(X_train, y_train)
print (scaledsvm_model)
# Get predictions from test data
svm_predictions = scaledsvm_model.predict(X_test)
svm_prob = scaledsvm_model.predict_proba(X_test)
# Overall metrics
print("Overall Accuracy:",accuracy_score(y_test, svm_predictions))
print("Overall Precision:",precision_score(y_test, svm_predictions, average='macro'))
print("Overall Recall:",recall_score(y_test, svm_predictions, average='macro'))
print('Average AUC:', roc_auc_score(y_test,svm_prob, multi_class='ovr'))
# Confusion matrix
plt.imshow(mcm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.xlabel("Predicted Variety")
plt.ylabel("Actual Variety")
plt.show()
from sklearn.metrics import roc_curve, roc_auc_score
# Get class probability scores
probabilities = svm_model.predict_proba(X_test)
auc = roc_auc_score(y_test,probabilities, multi_class='ovr')
print('Average AUC:', auc)
# Get ROC metrics for each class
fpr = {}
tpr = {}
thresh ={}
for i in range(len(classes)):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test, probabilities[:,i], pos_label=i)
# Plot the ROC chart
plt.plot(fpr[0], tpr[0], linestyle='--',color='orange', label=classes[0] + ' vs Rest')
plt.plot(fpr[1], tpr[1], linestyle='--',color='green', label=classes[1] + ' vs Rest')
plt.plot(fpr[2], tpr[2], linestyle='--',color='blue', label=classes[2] + ' vs Rest')
plt.title('Multiclass ROC curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive rate')
plt.legend(loc='best')
plt.show()
When you're happy with your model's predictive performance, save it and then use it to predict classes for the following two new wine samples:
import joblib
# Save the model as a pickle file
filename = './svm_model.pkl'
joblib.dump(scaledsvm_model, filename)
# Load the model from the file
svm_model = joblib.load(filename)
# This time our input is an array of two feature arrays
x_new = np.array([[13.72,1.43,2.5,16.7,108,3.4,3.67,0.19,2.04,6.8,0.89,2.87,1285],
[12.37,0.94,1.36,10.6,88,1.98,0.57,0.28,0.42,1.95,1.05,1.82,520]])
print ('New samples:\n{}'.format(x_new))
# Call the web service, passing the input data
predictions = svm_model.predict(x_new)
# Get the predicted classes.
for prediction in predictions:
print(prediction, '(' + classes[prediction] +')')