Ghemri Dataset

# Perceptron Algorithm on the Sonar Dataset
from random import seed
from random import randrange
from csv import reader

# Load a CSV file
def load_csv(filename):
    dataset = list()
    with open(filename, 'r',encoding='utf-8-sig') as file:
        csv_reader = reader(file)
        for row in csv_reader:
            if not row:
                continue
            dataset.append(row)
    return dataset

# Convert string column to float
def str_column_to_float(dataset, column):
    for row in dataset:
        row[column] = float(row[column].strip())

# Convert string column to integer
def str_column_to_int(dataset, column):
    class_values = [row[column] for row in dataset]
    unique = set(class_values)
    lookup = dict()
    for i, value in enumerate(unique):
        lookup[value] = i
    for row in dataset:
        row[column] = lookup[row[column]]
    return lookup

# Make a prediction with weights
def predict(row, weights):
    activation = weights[0]
    for i in range(len(row)-1):
        activation += weights[i + 1] * row[i]
    return 1.0 if activation >= 0.0 else 0.0

# Test the Perceptron algorithm on the sonar dataset
seed(1)
# load and prepare data
filename = 'ghemri.csv'
dataset = load_csv(filename)
for i in range(len(dataset[0])-1):
    str_column_to_float(dataset, i)
# convert string class to integers
str_column_to_int(dataset, len(dataset[0])-1)
## {'0': 0, '1': 1}
weights = [1, -0.314, 0.234,-0.21]
for row in dataset:
    prediction = predict(row, weights)
    print(prediction)
## 0.0
## 0.0
## 0.0
## 0.0
## 1.0
## 0.0
## 0.0
## 0.0
## 0.0
## 0.0

Part 2

Run and study the code in Listing 10.8. Afterwards, tune the parameters (or hyperparameters) to increase the accuracy of the models. Upload the model with the highest accuracy.

Running Listing 10.8 with the original parameters in the textbook:

# Perceptron Algorithm on the Sonar Dataset
from random import seed
from random import randrange
from csv import reader

# Load a CSV file
def load_csv(filename):
    dataset = list()
    with open(filename, 'r') as file:
        csv_reader = reader(file)
        for row in csv_reader:
            if not row:
                continue
            dataset.append(row)
    return dataset

# Convert string column to float
def str_column_to_float(dataset, column):
    for row in dataset:
        row[column] = float(row[column].strip())

# Convert string column to integer
def str_column_to_int(dataset, column):
    class_values = [row[column] for row in dataset]
    unique = set(class_values)
    lookup = dict()
    for i, value in enumerate(unique):
        lookup[value] = i
    for row in dataset:
        row[column] = lookup[row[column]]
    return lookup

# Split a dataset into k folds
def cross_validation_split(dataset, n_folds):
    dataset_split = list()
    dataset_copy = list(dataset)
    fold_size = int(len(dataset) / n_folds)
    for _ in range(n_folds):
        fold = list()
        while len(fold) < fold_size:
            index = randrange(len(dataset_copy))
            fold.append(dataset_copy.pop(index))
        dataset_split.append(fold)
    return dataset_split

# Calculate accuracy percentage
def accuracy_metric(actual, predicted):
    correct = 0
    for i in range(len(actual)):
        if actual[i] == predicted[i]:
            correct += 1
    return correct / float(len(actual)) * 100.0

# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
    folds = cross_validation_split(dataset, n_folds)
    scores = list()
    for fold in folds:
        train_set = list(folds)
        train_set.remove(fold)
        train_set = sum(train_set, [])
        test_set = list()
        for row in fold:
            row_copy = list(row)
            test_set.append(row_copy)
            row_copy[-1] = None
        predicted = algorithm(train_set, test_set, *args)
        actual = [row[-1] for row in fold]
        accuracy = accuracy_metric(actual, predicted)
        scores.append(accuracy)
    return scores

# Make a prediction with weights
def predict(row, weights):
    activation = weights[0]
    for i in range(len(row)-1):
        activation += weights[i + 1] * row[i]
    return 1.0 if activation >= 0.0 else 0.0

# Estimate Perceptron weights using stochastic gradient descent
def train_weights(train, l_rate, n_epoch):
    weights = [0.0 for i in range(len(train[0]))]
    for _ in range(n_epoch):
        for row in train:
            prediction = predict(row, weights)
            error = row[-1] - prediction
            weights[0] = weights[0] + l_rate * error
            for i in range(len(row)-1):
                weights[i + 1] = weights[i + 1] + l_rate * error * row[i]
    return weights

# Perceptron Algorithm With Stochastic Gradient Descent
def perceptron(train, test, l_rate, n_epoch):
    predictions = list()
    weights = train_weights(train, l_rate, n_epoch)
    for row in test:
        prediction = predict(row, weights)
        predictions.append(prediction)
    return(predictions)

# Test the Perceptron algorithm on the sonar dataset
seed(1)
# load and prepare data
filename = 'sonar.all-data.csv'
dataset = load_csv(filename)
for i in range(len(dataset[0])-1):
    str_column_to_float(dataset, i)
# convert string class to integers
str_column_to_int(dataset, len(dataset[0])-1)
## {'R': 0, 'M': 1}
# evaluate algorithm
n_folds = 3
l_rate = 0.01
n_epoch = 500
scores = evaluate_algorithm(dataset, perceptron, n_folds, l_rate, n_epoch)
print('Scores: %s' % scores)
## Scores: [81.15942028985508, 69.56521739130434, 62.31884057971014]
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))
## Mean Accuracy: 71.014%

Tunning the parameters to maximize the accuracy:

# Perceptron Algorithm on the Sonar Dataset
from random import seed
from random import randrange
from csv import reader

# Load a CSV file
def load_csv(filename):
    dataset = list()
    with open(filename, 'r') as file:
        csv_reader = reader(file)
        for row in csv_reader:
            if not row:
                continue
            dataset.append(row)
    return dataset

# Convert string column to float
def str_column_to_float(dataset, column):
    for row in dataset:
        row[column] = float(row[column].strip())

# Convert string column to integer
def str_column_to_int(dataset, column):
    class_values = [row[column] for row in dataset]
    unique = set(class_values)
    lookup = dict()
    for i, value in enumerate(unique):
        lookup[value] = i
    for row in dataset:
        row[column] = lookup[row[column]]
    return lookup

# Split a dataset into k folds
def cross_validation_split(dataset, n_folds):
    dataset_split = list()
    dataset_copy = list(dataset)
    fold_size = int(len(dataset) / n_folds)
    for _ in range(n_folds):
        fold = list()
        while len(fold) < fold_size:
            index = randrange(len(dataset_copy))
            fold.append(dataset_copy.pop(index))
        dataset_split.append(fold)
    return dataset_split

# Calculate accuracy percentage
def accuracy_metric(actual, predicted):
    correct = 0
    for i in range(len(actual)):
        if actual[i] == predicted[i]:
            correct += 1
    return correct / float(len(actual)) * 100.0

# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
    folds = cross_validation_split(dataset, n_folds)
    scores = list()
    for fold in folds:
        train_set = list(folds)
        train_set.remove(fold)
        train_set = sum(train_set, [])
        test_set = list()
        for row in fold:
            row_copy = list(row)
            test_set.append(row_copy)
            row_copy[-1] = None
        predicted = algorithm(train_set, test_set, *args)
        actual = [row[-1] for row in fold]
        accuracy = accuracy_metric(actual, predicted)
        scores.append(accuracy)
    return scores

# Make a prediction with weights
def predict(row, weights):
    activation = weights[0]
    for i in range(len(row)-1):
        activation += weights[i + 1] * row[i]
    return 1.0 if activation >= 0.0 else 0.0

# Estimate Perceptron weights using stochastic gradient descent
def train_weights(train, l_rate, n_epoch):
    weights = [0.0 for i in range(len(train[0]))]
    for _ in range(n_epoch):
        for row in train:
            prediction = predict(row, weights)
            error = row[-1] - prediction
            weights[0] = weights[0] + l_rate * error
            for i in range(len(row)-1):
                weights[i + 1] = weights[i + 1] + l_rate * error * row[i]
    return weights

# Perceptron Algorithm With Stochastic Gradient Descent
def perceptron(train, test, l_rate, n_epoch):
    predictions = list()
    weights = train_weights(train, l_rate, n_epoch)
    for row in test:
        prediction = predict(row, weights)
        predictions.append(prediction)
    return(predictions)

# Test the Perceptron algorithm on the sonar dataset
seed(1)
# load and prepare data
filename = 'sonar.all-data.csv'
dataset = load_csv(filename)
for i in range(len(dataset[0])-1):
    str_column_to_float(dataset, i)
# convert string class to integers
str_column_to_int(dataset, len(dataset[0])-1)
## {'R': 0, 'M': 1}
# evaluate algorithm
n_folds = 3
l_rate = 0.01
n_epoch = 500
for n_folds in range(3,10):
  for l_rate in [.05, .01, .005, .001]:
    for n_epoch in [100, 500, 1000, 2000]:
      scores = evaluate_algorithm(dataset, perceptron, n_folds, l_rate, n_epoch)
      print('Scores: %s' % scores)
      print('Mean Accuracy for n_folds: %d, l_rate: %f, and n_epoch:%d is %.3f%%' %  (n_folds, l_rate, n_epoch, sum(scores)/float(len(scores))))
## Scores: [82.6086956521739, 66.66666666666666, 56.52173913043478]
## Mean Accuracy for n_folds: 3, l_rate: 0.050000, and n_epoch:100 is 68.599%
## Scores: [71.01449275362319, 66.66666666666666, 65.21739130434783]
## Mean Accuracy for n_folds: 3, l_rate: 0.050000, and n_epoch:500 is 67.633%
## Scores: [71.01449275362319, 78.26086956521739, 81.15942028985508]
## Mean Accuracy for n_folds: 3, l_rate: 0.050000, and n_epoch:1000 is 76.812%
## Scores: [69.56521739130434, 78.26086956521739, 79.71014492753623]
## Mean Accuracy for n_folds: 3, l_rate: 0.050000, and n_epoch:2000 is 75.845%
## Scores: [65.21739130434783, 75.36231884057972, 75.36231884057972]
## Mean Accuracy for n_folds: 3, l_rate: 0.010000, and n_epoch:100 is 71.981%
## Scores: [73.91304347826086, 68.11594202898551, 75.36231884057972]
## Mean Accuracy for n_folds: 3, l_rate: 0.010000, and n_epoch:500 is 72.464%
## Scores: [62.31884057971014, 72.46376811594203, 72.46376811594203]
## Mean Accuracy for n_folds: 3, l_rate: 0.010000, and n_epoch:1000 is 69.082%
## Scores: [66.66666666666666, 63.76811594202898, 78.26086956521739]
## Mean Accuracy for n_folds: 3, l_rate: 0.010000, and n_epoch:2000 is 69.565%
## Scores: [69.56521739130434, 73.91304347826086, 71.01449275362319]
## Mean Accuracy for n_folds: 3, l_rate: 0.005000, and n_epoch:100 is 71.498%
## Scores: [71.01449275362319, 76.81159420289855, 78.26086956521739]
## Mean Accuracy for n_folds: 3, l_rate: 0.005000, and n_epoch:500 is 75.362%
## Scores: [66.66666666666666, 69.56521739130434, 75.36231884057972]
## Mean Accuracy for n_folds: 3, l_rate: 0.005000, and n_epoch:1000 is 70.531%
## Scores: [78.26086956521739, 82.6086956521739, 82.6086956521739]
## Mean Accuracy for n_folds: 3, l_rate: 0.005000, and n_epoch:2000 is 81.159%
## Scores: [62.31884057971014, 60.86956521739131, 68.11594202898551]
## Mean Accuracy for n_folds: 3, l_rate: 0.001000, and n_epoch:100 is 63.768%
## Scores: [65.21739130434783, 79.71014492753623, 72.46376811594203]
## Mean Accuracy for n_folds: 3, l_rate: 0.001000, and n_epoch:500 is 72.464%
## Scores: [69.56521739130434, 65.21739130434783, 71.01449275362319]
## Mean Accuracy for n_folds: 3, l_rate: 0.001000, and n_epoch:1000 is 68.599%
## Scores: [69.56521739130434, 72.46376811594203, 71.01449275362319]
## Mean Accuracy for n_folds: 3, l_rate: 0.001000, and n_epoch:2000 is 71.014%
## Scores: [71.15384615384616, 69.23076923076923, 67.3076923076923, 71.15384615384616]
## Mean Accuracy for n_folds: 4, l_rate: 0.050000, and n_epoch:100 is 69.712%
## Scores: [73.07692307692307, 57.692307692307686, 76.92307692307693, 73.07692307692307]
## Mean Accuracy for n_folds: 4, l_rate: 0.050000, and n_epoch:500 is 70.192%
## Scores: [75.0, 82.6923076923077, 80.76923076923077, 69.23076923076923]
## Mean Accuracy for n_folds: 4, l_rate: 0.050000, and n_epoch:1000 is 76.923%
## Scores: [73.07692307692307, 75.0, 76.92307692307693, 69.23076923076923]
## Mean Accuracy for n_folds: 4, l_rate: 0.050000, and n_epoch:2000 is 73.558%
## Scores: [63.46153846153846, 67.3076923076923, 73.07692307692307, 65.38461538461539]
## Mean Accuracy for n_folds: 4, l_rate: 0.010000, and n_epoch:100 is 67.308%
## Scores: [75.0, 78.84615384615384, 69.23076923076923, 73.07692307692307]
## Mean Accuracy for n_folds: 4, l_rate: 0.010000, and n_epoch:500 is 74.038%
## Scores: [73.07692307692307, 76.92307692307693, 71.15384615384616, 76.92307692307693]
## Mean Accuracy for n_folds: 4, l_rate: 0.010000, and n_epoch:1000 is 74.519%
## Scores: [82.6923076923077, 69.23076923076923, 76.92307692307693, 65.38461538461539]
## Mean Accuracy for n_folds: 4, l_rate: 0.010000, and n_epoch:2000 is 73.558%
## Scores: [75.0, 69.23076923076923, 59.61538461538461, 59.61538461538461]
## Mean Accuracy for n_folds: 4, l_rate: 0.005000, and n_epoch:100 is 65.865%
## Scores: [71.15384615384616, 69.23076923076923, 71.15384615384616, 78.84615384615384]
## Mean Accuracy for n_folds: 4, l_rate: 0.005000, and n_epoch:500 is 72.596%
## Scores: [80.76923076923077, 67.3076923076923, 69.23076923076923, 65.38461538461539]
## Mean Accuracy for n_folds: 4, l_rate: 0.005000, and n_epoch:1000 is 70.673%
## Scores: [61.53846153846154, 71.15384615384616, 61.53846153846154, 67.3076923076923]
## Mean Accuracy for n_folds: 4, l_rate: 0.005000, and n_epoch:2000 is 65.385%
## Scores: [65.38461538461539, 80.76923076923077, 76.92307692307693, 67.3076923076923]
## Mean Accuracy for n_folds: 4, l_rate: 0.001000, and n_epoch:100 is 72.596%
## Scores: [73.07692307692307, 73.07692307692307, 71.15384615384616, 73.07692307692307]
## Mean Accuracy for n_folds: 4, l_rate: 0.001000, and n_epoch:500 is 72.596%
## Scores: [80.76923076923077, 80.76923076923077, 65.38461538461539, 71.15384615384616]
## Mean Accuracy for n_folds: 4, l_rate: 0.001000, and n_epoch:1000 is 74.519%
## Scores: [76.92307692307693, 69.23076923076923, 76.92307692307693, 73.07692307692307]
## Mean Accuracy for n_folds: 4, l_rate: 0.001000, and n_epoch:2000 is 74.038%
## Scores: [70.73170731707317, 80.48780487804879, 68.29268292682927, 63.41463414634146, 73.17073170731707]
## Mean Accuracy for n_folds: 5, l_rate: 0.050000, and n_epoch:100 is 71.220%
## Scores: [73.17073170731707, 75.60975609756098, 68.29268292682927, 75.60975609756098, 60.97560975609756]
## Mean Accuracy for n_folds: 5, l_rate: 0.050000, and n_epoch:500 is 70.732%
## Scores: [65.85365853658537, 75.60975609756098, 73.17073170731707, 78.04878048780488, 75.60975609756098]
## Mean Accuracy for n_folds: 5, l_rate: 0.050000, and n_epoch:1000 is 73.659%
## Scores: [68.29268292682927, 70.73170731707317, 85.36585365853658, 78.04878048780488, 63.41463414634146]
## Mean Accuracy for n_folds: 5, l_rate: 0.050000, and n_epoch:2000 is 73.171%
## Scores: [78.04878048780488, 78.04878048780488, 78.04878048780488, 68.29268292682927, 68.29268292682927]
## Mean Accuracy for n_folds: 5, l_rate: 0.010000, and n_epoch:100 is 74.146%
## Scores: [68.29268292682927, 73.17073170731707, 73.17073170731707, 73.17073170731707, 73.17073170731707]
## Mean Accuracy for n_folds: 5, l_rate: 0.010000, and n_epoch:500 is 72.195%
## Scores: [80.48780487804879, 78.04878048780488, 73.17073170731707, 82.92682926829268, 73.17073170731707]
## Mean Accuracy for n_folds: 5, l_rate: 0.010000, and n_epoch:1000 is 77.561%
## Scores: [70.73170731707317, 58.536585365853654, 63.41463414634146, 82.92682926829268, 65.85365853658537]
## Mean Accuracy for n_folds: 5, l_rate: 0.010000, and n_epoch:2000 is 68.293%
## Scores: [70.73170731707317, 78.04878048780488, 63.41463414634146, 70.73170731707317, 78.04878048780488]
## Mean Accuracy for n_folds: 5, l_rate: 0.005000, and n_epoch:100 is 72.195%
## Scores: [48.78048780487805, 70.73170731707317, 82.92682926829268, 60.97560975609756, 75.60975609756098]
## Mean Accuracy for n_folds: 5, l_rate: 0.005000, and n_epoch:500 is 67.805%
## Scores: [75.60975609756098, 78.04878048780488, 65.85365853658537, 70.73170731707317, 73.17073170731707]
## Mean Accuracy for n_folds: 5, l_rate: 0.005000, and n_epoch:1000 is 72.683%
## Scores: [73.17073170731707, 70.73170731707317, 70.73170731707317, 65.85365853658537, 65.85365853658537]
## Mean Accuracy for n_folds: 5, l_rate: 0.005000, and n_epoch:2000 is 69.268%
## Scores: [56.09756097560976, 73.17073170731707, 68.29268292682927, 78.04878048780488, 80.48780487804879]
## Mean Accuracy for n_folds: 5, l_rate: 0.001000, and n_epoch:100 is 71.220%
## Scores: [80.48780487804879, 73.17073170731707, 82.92682926829268, 58.536585365853654, 63.41463414634146]
## Mean Accuracy for n_folds: 5, l_rate: 0.001000, and n_epoch:500 is 71.707%
## Scores: [85.36585365853658, 75.60975609756098, 68.29268292682927, 78.04878048780488, 58.536585365853654]
## Mean Accuracy for n_folds: 5, l_rate: 0.001000, and n_epoch:1000 is 73.171%
## Scores: [68.29268292682927, 73.17073170731707, 73.17073170731707, 85.36585365853658, 70.73170731707317]
## Mean Accuracy for n_folds: 5, l_rate: 0.001000, and n_epoch:2000 is 74.146%
## Scores: [73.52941176470588, 76.47058823529412, 70.58823529411765, 58.82352941176471, 73.52941176470588, 82.35294117647058]
## Mean Accuracy for n_folds: 6, l_rate: 0.050000, and n_epoch:100 is 72.549%
## Scores: [64.70588235294117, 82.35294117647058, 73.52941176470588, 73.52941176470588, 76.47058823529412, 70.58823529411765]
## Mean Accuracy for n_folds: 6, l_rate: 0.050000, and n_epoch:500 is 73.529%
## Scores: [76.47058823529412, 70.58823529411765, 61.76470588235294, 70.58823529411765, 79.41176470588235, 67.64705882352942]
## Mean Accuracy for n_folds: 6, l_rate: 0.050000, and n_epoch:1000 is 71.078%
## Scores: [67.64705882352942, 82.35294117647058, 76.47058823529412, 70.58823529411765, 76.47058823529412, 67.64705882352942]
## Mean Accuracy for n_folds: 6, l_rate: 0.050000, and n_epoch:2000 is 73.529%
## Scores: [50.0, 67.64705882352942, 79.41176470588235, 61.76470588235294, 64.70588235294117, 79.41176470588235]
## Mean Accuracy for n_folds: 6, l_rate: 0.010000, and n_epoch:100 is 67.157%
## Scores: [70.58823529411765, 73.52941176470588, 79.41176470588235, 70.58823529411765, 73.52941176470588, 64.70588235294117]
## Mean Accuracy for n_folds: 6, l_rate: 0.010000, and n_epoch:500 is 72.059%
## Scores: [76.47058823529412, 70.58823529411765, 85.29411764705883, 70.58823529411765, 70.58823529411765, 79.41176470588235]
## Mean Accuracy for n_folds: 6, l_rate: 0.010000, and n_epoch:1000 is 75.490%
## Scores: [67.64705882352942, 67.64705882352942, 79.41176470588235, 73.52941176470588, 73.52941176470588, 79.41176470588235]
## Mean Accuracy for n_folds: 6, l_rate: 0.010000, and n_epoch:2000 is 73.529%
## Scores: [47.05882352941176, 58.82352941176471, 58.82352941176471, 79.41176470588235, 85.29411764705883, 76.47058823529412]
## Mean Accuracy for n_folds: 6, l_rate: 0.005000, and n_epoch:100 is 67.647%
## Scores: [73.52941176470588, 73.52941176470588, 79.41176470588235, 76.47058823529412, 76.47058823529412, 76.47058823529412]
## Mean Accuracy for n_folds: 6, l_rate: 0.005000, and n_epoch:500 is 75.980%
## Scores: [73.52941176470588, 85.29411764705883, 76.47058823529412, 79.41176470588235, 67.64705882352942, 61.76470588235294]
## Mean Accuracy for n_folds: 6, l_rate: 0.005000, and n_epoch:1000 is 74.020%
## Scores: [58.82352941176471, 73.52941176470588, 61.76470588235294, 70.58823529411765, 76.47058823529412, 79.41176470588235]
## Mean Accuracy for n_folds: 6, l_rate: 0.005000, and n_epoch:2000 is 70.098%
## Scores: [73.52941176470588, 64.70588235294117, 79.41176470588235, 76.47058823529412, 61.76470588235294, 61.76470588235294]
## Mean Accuracy for n_folds: 6, l_rate: 0.001000, and n_epoch:100 is 69.608%
## Scores: [73.52941176470588, 79.41176470588235, 70.58823529411765, 73.52941176470588, 70.58823529411765, 67.64705882352942]
## Mean Accuracy for n_folds: 6, l_rate: 0.001000, and n_epoch:500 is 72.549%
## Scores: [73.52941176470588, 85.29411764705883, 67.64705882352942, 82.35294117647058, 70.58823529411765, 70.58823529411765]
## Mean Accuracy for n_folds: 6, l_rate: 0.001000, and n_epoch:1000 is 75.000%
## Scores: [76.47058823529412, 82.35294117647058, 73.52941176470588, 70.58823529411765, 76.47058823529412, 67.64705882352942]
## Mean Accuracy for n_folds: 6, l_rate: 0.001000, and n_epoch:2000 is 74.510%
## Scores: [86.20689655172413, 75.86206896551724, 65.51724137931035, 68.96551724137932, 89.65517241379311, 75.86206896551724, 79.3103448275862]
## Mean Accuracy for n_folds: 7, l_rate: 0.050000, and n_epoch:100 is 77.340%
## Scores: [79.3103448275862, 65.51724137931035, 62.06896551724138, 75.86206896551724, 75.86206896551724, 68.96551724137932, 68.96551724137932]
## Mean Accuracy for n_folds: 7, l_rate: 0.050000, and n_epoch:500 is 70.936%
## Scores: [75.86206896551724, 82.75862068965517, 89.65517241379311, 72.41379310344827, 75.86206896551724, 65.51724137931035, 65.51724137931035]
## Mean Accuracy for n_folds: 7, l_rate: 0.050000, and n_epoch:1000 is 75.369%
## Scores: [79.3103448275862, 68.96551724137932, 86.20689655172413, 62.06896551724138, 68.96551724137932, 65.51724137931035, 51.724137931034484]
## Mean Accuracy for n_folds: 7, l_rate: 0.050000, and n_epoch:2000 is 68.966%
## Scores: [86.20689655172413, 86.20689655172413, 82.75862068965517, 75.86206896551724, 65.51724137931035, 82.75862068965517, 72.41379310344827]
## Mean Accuracy for n_folds: 7, l_rate: 0.010000, and n_epoch:100 is 78.818%
## Scores: [65.51724137931035, 68.96551724137932, 65.51724137931035, 82.75862068965517, 75.86206896551724, 79.3103448275862, 62.06896551724138]
## Mean Accuracy for n_folds: 7, l_rate: 0.010000, and n_epoch:500 is 71.429%
## Scores: [79.3103448275862, 72.41379310344827, 72.41379310344827, 79.3103448275862, 62.06896551724138, 79.3103448275862, 58.620689655172406]
## Mean Accuracy for n_folds: 7, l_rate: 0.010000, and n_epoch:1000 is 71.921%
## Scores: [75.86206896551724, 72.41379310344827, 75.86206896551724, 79.3103448275862, 65.51724137931035, 65.51724137931035, 79.3103448275862]
## Mean Accuracy for n_folds: 7, l_rate: 0.010000, and n_epoch:2000 is 73.399%
## Scores: [65.51724137931035, 62.06896551724138, 93.10344827586206, 65.51724137931035, 48.275862068965516, 82.75862068965517, 58.620689655172406]
## Mean Accuracy for n_folds: 7, l_rate: 0.005000, and n_epoch:100 is 67.980%
## Scores: [86.20689655172413, 75.86206896551724, 75.86206896551724, 65.51724137931035, 68.96551724137932, 79.3103448275862, 62.06896551724138]
## Mean Accuracy for n_folds: 7, l_rate: 0.005000, and n_epoch:500 is 73.399%
## Scores: [68.96551724137932, 58.620689655172406, 62.06896551724138, 82.75862068965517, 79.3103448275862, 72.41379310344827, 86.20689655172413]
## Mean Accuracy for n_folds: 7, l_rate: 0.005000, and n_epoch:1000 is 72.906%
## Scores: [79.3103448275862, 62.06896551724138, 65.51724137931035, 72.41379310344827, 68.96551724137932, 72.41379310344827, 79.3103448275862]
## Mean Accuracy for n_folds: 7, l_rate: 0.005000, and n_epoch:2000 is 71.429%
## Scores: [62.06896551724138, 65.51724137931035, 68.96551724137932, 82.75862068965517, 68.96551724137932, 68.96551724137932, 68.96551724137932]
## Mean Accuracy for n_folds: 7, l_rate: 0.001000, and n_epoch:100 is 69.458%
## Scores: [62.06896551724138, 55.172413793103445, 58.620689655172406, 79.3103448275862, 86.20689655172413, 68.96551724137932, 72.41379310344827]
## Mean Accuracy for n_folds: 7, l_rate: 0.001000, and n_epoch:500 is 68.966%
## Scores: [68.96551724137932, 68.96551724137932, 75.86206896551724, 58.620689655172406, 65.51724137931035, 75.86206896551724, 62.06896551724138]
## Mean Accuracy for n_folds: 7, l_rate: 0.001000, and n_epoch:1000 is 67.980%
## Scores: [79.3103448275862, 86.20689655172413, 55.172413793103445, 72.41379310344827, 75.86206896551724, 79.3103448275862, 79.3103448275862]
## Mean Accuracy for n_folds: 7, l_rate: 0.001000, and n_epoch:2000 is 75.369%
## Scores: [65.38461538461539, 76.92307692307693, 76.92307692307693, 73.07692307692307, 61.53846153846154, 61.53846153846154, 80.76923076923077, 76.92307692307693]
## Mean Accuracy for n_folds: 8, l_rate: 0.050000, and n_epoch:100 is 71.635%
## Scores: [69.23076923076923, 61.53846153846154, 69.23076923076923, 76.92307692307693, 88.46153846153845, 65.38461538461539, 73.07692307692307, 61.53846153846154]
## Mean Accuracy for n_folds: 8, l_rate: 0.050000, and n_epoch:500 is 70.673%
## Scores: [80.76923076923077, 84.61538461538461, 84.61538461538461, 65.38461538461539, 69.23076923076923, 73.07692307692307, 76.92307692307693, 69.23076923076923]
## Mean Accuracy for n_folds: 8, l_rate: 0.050000, and n_epoch:1000 is 75.481%
## Scores: [80.76923076923077, 65.38461538461539, 80.76923076923077, 80.76923076923077, 80.76923076923077, 73.07692307692307, 84.61538461538461, 69.23076923076923]
## Mean Accuracy for n_folds: 8, l_rate: 0.050000, and n_epoch:2000 is 76.923%
## Scores: [80.76923076923077, 76.92307692307693, 53.84615384615385, 80.76923076923077, 57.692307692307686, 80.76923076923077, 65.38461538461539, 92.3076923076923]
## Mean Accuracy for n_folds: 8, l_rate: 0.010000, and n_epoch:100 is 73.558%
## Scores: [92.3076923076923, 84.61538461538461, 73.07692307692307, 76.92307692307693, 69.23076923076923, 69.23076923076923, 69.23076923076923, 69.23076923076923]
## Mean Accuracy for n_folds: 8, l_rate: 0.010000, and n_epoch:500 is 75.481%
## Scores: [73.07692307692307, 61.53846153846154, 69.23076923076923, 50.0, 80.76923076923077, 69.23076923076923, 76.92307692307693, 69.23076923076923]
## Mean Accuracy for n_folds: 8, l_rate: 0.010000, and n_epoch:1000 is 68.750%
## Scores: [73.07692307692307, 73.07692307692307, 69.23076923076923, 88.46153846153845, 61.53846153846154, 80.76923076923077, 69.23076923076923, 80.76923076923077]
## Mean Accuracy for n_folds: 8, l_rate: 0.010000, and n_epoch:2000 is 74.519%
## Scores: [84.61538461538461, 69.23076923076923, 84.61538461538461, 88.46153846153845, 80.76923076923077, 73.07692307692307, 76.92307692307693, 65.38461538461539]
## Mean Accuracy for n_folds: 8, l_rate: 0.005000, and n_epoch:100 is 77.885%
## Scores: [61.53846153846154, 80.76923076923077, 84.61538461538461, 65.38461538461539, 76.92307692307693, 69.23076923076923, 65.38461538461539, 76.92307692307693]
## Mean Accuracy for n_folds: 8, l_rate: 0.005000, and n_epoch:500 is 72.596%
## Scores: [84.61538461538461, 76.92307692307693, 76.92307692307693, 76.92307692307693, 84.61538461538461, 69.23076923076923, 69.23076923076923, 53.84615384615385]
## Mean Accuracy for n_folds: 8, l_rate: 0.005000, and n_epoch:1000 is 74.038%
## Scores: [57.692307692307686, 69.23076923076923, 65.38461538461539, 76.92307692307693, 69.23076923076923, 73.07692307692307, 65.38461538461539, 80.76923076923077]
## Mean Accuracy for n_folds: 8, l_rate: 0.005000, and n_epoch:2000 is 69.712%
## Scores: [57.692307692307686, 69.23076923076923, 73.07692307692307, 84.61538461538461, 65.38461538461539, 69.23076923076923, 61.53846153846154, 65.38461538461539]
## Mean Accuracy for n_folds: 8, l_rate: 0.001000, and n_epoch:100 is 68.269%
## Scores: [57.692307692307686, 73.07692307692307, 84.61538461538461, 76.92307692307693, 76.92307692307693, 69.23076923076923, 65.38461538461539, 84.61538461538461]
## Mean Accuracy for n_folds: 8, l_rate: 0.001000, and n_epoch:500 is 73.558%
## Scores: [84.61538461538461, 76.92307692307693, 57.692307692307686, 73.07692307692307, 73.07692307692307, 88.46153846153845, 65.38461538461539, 73.07692307692307]
## Mean Accuracy for n_folds: 8, l_rate: 0.001000, and n_epoch:1000 is 74.038%
## Scores: [76.92307692307693, 73.07692307692307, 80.76923076923077, 61.53846153846154, 73.07692307692307, 88.46153846153845, 76.92307692307693, 84.61538461538461]
## Mean Accuracy for n_folds: 8, l_rate: 0.001000, and n_epoch:2000 is 76.923%
## Scores: [82.6086956521739, 78.26086956521739, 69.56521739130434, 56.52173913043478, 78.26086956521739, 73.91304347826086, 56.52173913043478, 65.21739130434783, 86.95652173913044]
## Mean Accuracy for n_folds: 9, l_rate: 0.050000, and n_epoch:100 is 71.981%
## Scores: [78.26086956521739, 56.52173913043478, 56.52173913043478, 78.26086956521739, 78.26086956521739, 65.21739130434783, 73.91304347826086, 69.56521739130434, 78.26086956521739]
## Mean Accuracy for n_folds: 9, l_rate: 0.050000, and n_epoch:500 is 70.531%
## Scores: [82.6086956521739, 78.26086956521739, 69.56521739130434, 86.95652173913044, 52.17391304347826, 56.52173913043478, 65.21739130434783, 73.91304347826086, 91.30434782608695]
## Mean Accuracy for n_folds: 9, l_rate: 0.050000, and n_epoch:1000 is 72.947%
## Scores: [65.21739130434783, 78.26086956521739, 78.26086956521739, 65.21739130434783, 86.95652173913044, 60.86956521739131, 69.56521739130434, 60.86956521739131, 82.6086956521739]
## Mean Accuracy for n_folds: 9, l_rate: 0.050000, and n_epoch:2000 is 71.981%
## Scores: [73.91304347826086, 69.56521739130434, 69.56521739130434, 82.6086956521739, 73.91304347826086, 60.86956521739131, 86.95652173913044, 65.21739130434783, 69.56521739130434]
## Mean Accuracy for n_folds: 9, l_rate: 0.010000, and n_epoch:100 is 72.464%
## Scores: [65.21739130434783, 69.56521739130434, 73.91304347826086, 86.95652173913044, 69.56521739130434, 65.21739130434783, 78.26086956521739, 65.21739130434783, 78.26086956521739]
## Mean Accuracy for n_folds: 9, l_rate: 0.010000, and n_epoch:500 is 72.464%
## Scores: [60.86956521739131, 82.6086956521739, 82.6086956521739, 73.91304347826086, 56.52173913043478, 78.26086956521739, 82.6086956521739, 65.21739130434783, 69.56521739130434]
## Mean Accuracy for n_folds: 9, l_rate: 0.010000, and n_epoch:1000 is 72.464%
## Scores: [78.26086956521739, 69.56521739130434, 65.21739130434783, 78.26086956521739, 73.91304347826086, 82.6086956521739, 73.91304347826086, 65.21739130434783, 78.26086956521739]
## Mean Accuracy for n_folds: 9, l_rate: 0.010000, and n_epoch:2000 is 73.913%
## Scores: [39.130434782608695, 73.91304347826086, 60.86956521739131, 78.26086956521739, 69.56521739130434, 60.86956521739131, 65.21739130434783, 69.56521739130434, 65.21739130434783]
## Mean Accuracy for n_folds: 9, l_rate: 0.005000, and n_epoch:100 is 64.734%
## Scores: [73.91304347826086, 60.86956521739131, 65.21739130434783, 82.6086956521739, 69.56521739130434, 65.21739130434783, 56.52173913043478, 86.95652173913044, 82.6086956521739]
## Mean Accuracy for n_folds: 9, l_rate: 0.005000, and n_epoch:500 is 71.498%
## Scores: [91.30434782608695, 73.91304347826086, 82.6086956521739, 78.26086956521739, 69.56521739130434, 91.30434782608695, 69.56521739130434, 65.21739130434783, 60.86956521739131]
## Mean Accuracy for n_folds: 9, l_rate: 0.005000, and n_epoch:1000 is 75.845%
## Scores: [82.6086956521739, 86.95652173913044, 78.26086956521739, 78.26086956521739, 78.26086956521739, 86.95652173913044, 73.91304347826086, 69.56521739130434, 69.56521739130434]
## Mean Accuracy for n_folds: 9, l_rate: 0.005000, and n_epoch:2000 is 78.261%
## Scores: [69.56521739130434, 69.56521739130434, 60.86956521739131, 86.95652173913044, 78.26086956521739, 82.6086956521739, 73.91304347826086, 73.91304347826086, 78.26086956521739]
## Mean Accuracy for n_folds: 9, l_rate: 0.001000, and n_epoch:100 is 74.879%
## Scores: [78.26086956521739, 69.56521739130434, 82.6086956521739, 65.21739130434783, 73.91304347826086, 73.91304347826086, 78.26086956521739, 73.91304347826086, 78.26086956521739]
## Mean Accuracy for n_folds: 9, l_rate: 0.001000, and n_epoch:500 is 74.879%
## Scores: [69.56521739130434, 65.21739130434783, 69.56521739130434, 78.26086956521739, 69.56521739130434, 73.91304347826086, 69.56521739130434, 69.56521739130434, 82.6086956521739]
## Mean Accuracy for n_folds: 9, l_rate: 0.001000, and n_epoch:1000 is 71.981%
## Scores: [60.86956521739131, 95.65217391304348, 78.26086956521739, 82.6086956521739, 78.26086956521739, 73.91304347826086, 86.95652173913044, 65.21739130434783, 69.56521739130434]
## Mean Accuracy for n_folds: 9, l_rate: 0.001000, and n_epoch:2000 is 76.812%

Base on these results the best accuracy is 78.818% for the following parameters values. Mean Accuracy for n_folds: 7, l_rate: 0.010000, and n_epoch:100 is 78.818%