2017-10-06 7 views
0

J'utilise le code randomforest basé sur here. Ici, il est (sauter vers la fin pour voir la question):La modification de l'ensemble de données pour le code de forêt aléatoire entraîne des résultats anormaux

# Select the best split point for a dataset 
def get_split(dataset, n_features): 
    class_values = list(set(row[-1] for row in dataset)) 
    b_index, b_value, b_score, b_groups = 999, 999, 999, None 
    features = list() 
    while len(features) < n_features: 
     index = randrange(len(dataset[0])-1) 
     if index not in features: 
      features.append(index) 
    for index in features: 
     for row in dataset: 
      groups = test_split(index, row[index], dataset) 
      gini = gini_index(groups, class_values) 
      if gini < b_score: 
       b_index, b_value, b_score, b_groups = index, row[index], gini, groups 
    return {'index':b_index, 'value':b_value, 'groups':b_groups} 


# Random Forest Algorithm on Sonar Dataset 
from random import seed 
from random import randrange 
from csv import reader 
from math import sqrt 


# Load a CSV file 
def load_csv(filename): 
    dataset = list() 
    with open(filename, 'r') as file: 
     csv_reader = reader(file) 
     for row in csv_reader: 
      if not row: 
       continue 
      dataset.append(row) 
    return dataset 


# Convert string column to float 
def str_column_to_float(dataset, column): 
    for row in dataset: 
     row[column] = float(row[column].strip()) 


# Convert string column to integer 
def str_column_to_int(dataset, column): 
    class_values = [row[column] for row in dataset] 
    unique = set(class_values) 
    lookup = dict() 
    for i, value in enumerate(unique): 
     lookup[value] = i 
    for row in dataset: 
     row[column] = lookup[row[column]] 
    return lookup 


# Split a dataset into k folds 
def cross_validation_split(dataset, n_folds): 
    dataset_split = list() 
    dataset_copy = list(dataset) 
    fold_size = int(len(dataset)/n_folds) 
    for i in range(n_folds): 
     fold = list() 
     while len(fold) < fold_size: 
      index = randrange(len(dataset_copy)) 
      fold.append(dataset_copy.pop(index)) 
     dataset_split.append(fold) 
    return dataset_split 


# Calculate accuracy percentage 
def accuracy_metric(actual, predicted): 
    correct = 0 
    for i in range(len(actual)): 
     if actual[i] == predicted[i]: 
      correct += 1 
    return correct/float(len(actual)) * 100.0 


# Evaluate an algorithm using a cross validation split 
def evaluate_algorithm(dataset, algorithm, n_folds, *args): 
    folds = cross_validation_split(dataset, n_folds) 
    scores = list() 
    for fold in folds: 
     train_set = list(folds) 
     train_set.remove(fold) 
     train_set = sum(train_set, []) 
     test_set = list() 
     for row in fold: 
      row_copy = list(row) 
      test_set.append(row_copy) 
      row_copy[-1] = None 
     predicted = algorithm(train_set, test_set, *args) 
     actual = [row[-1] for row in fold] 
     accuracy = accuracy_metric(actual, predicted) 
     scores.append(accuracy) 
    return scores 


# Split a dataset based on an attribute and an attribute value 
def test_split(index, value, dataset): 
    left, right = list(), list() 
    for row in dataset: 
     if row[index] < value: 
      left.append(row) 
     else: 
      right.append(row) 
    return left, right 


# Calculate the Gini index for a split dataset 
def gini_index(groups, classes): 
    # count all samples at split point 
    n_instances = float(sum([len(group) for group in groups])) 
    # sum weighted Gini index for each group 
    gini = 0.0 
    for group in groups: 
     size = float(len(group)) 
     # avoid divide by zero 
     if size == 0: 
      continue 
     score = 0.0 
     # score the group based on the score for each class 
     for class_val in classes: 
      p = [row[-1] for row in group].count(class_val)/size 
      score += p * p 
     # weight the group score by its relative size 
     gini += (1.0 - score) * (size/n_instances) 
    return gini 


# Select the best split point for a dataset 
def get_split(dataset, n_features): 
    class_values = list(set(row[-1] for row in dataset)) 
    b_index, b_value, b_score, b_groups = 999, 999, 999, None 
    features = list() 
    while len(features) < n_features: 
     index = randrange(len(dataset[0]) - 1) 
     if index not in features: 
      features.append(index) 
    for index in features: 
     for row in dataset: 
      groups = test_split(index, row[index], dataset) 
      gini = gini_index(groups, class_values) 
      if gini < b_score: 
       b_index, b_value, b_score, b_groups = index, row[index], gini, groups 
    return {'index': b_index, 'value': b_value, 'groups': b_groups} 


# Create a terminal node value 
def to_terminal(group): 
    outcomes = [row[-1] for row in group] 
    return max(set(outcomes), key=outcomes.count) 


# Create child splits for a node or make terminal 
def split(node, max_depth, min_size, n_features, depth): 
    left, right = node['groups'] 
    del (node['groups']) 
    # check for a no split 
    if not left or not right: 
     node['left'] = node['right'] = to_terminal(left + right) 
     return 
    # check for max depth 
    if depth >= max_depth: 
     node['left'], node['right'] = to_terminal(left), to_terminal(right) 
     return 
    # process left child 
    if len(left) <= min_size: 
     node['left'] = to_terminal(left) 
    else: 
     node['left'] = get_split(left, n_features) 
     split(node['left'], max_depth, min_size, n_features, depth + 1) 
    # process right child 
    if len(right) <= min_size: 
     node['right'] = to_terminal(right) 
    else: 
     node['right'] = get_split(right, n_features) 
     split(node['right'], max_depth, min_size, n_features, depth + 1) 


# Build a decision tree 
def build_tree(train, max_depth, min_size, n_features): 
    root = get_split(train, n_features) 
    split(root, max_depth, min_size, n_features, 1) 
    return root 


# Make a prediction with a decision tree 
def predict(node, row): 
    if row[node['index']] < node['value']: 
     if isinstance(node['left'], dict): 
      return predict(node['left'], row) 
     else: 
      return node['left'] 
    else: 
     if isinstance(node['right'], dict): 
      return predict(node['right'], row) 
     else: 
      return node['right'] 


# Create a random subsample from the dataset with replacement 
def subsample(dataset, ratio): 
    sample = list() 
    n_sample = round(len(dataset) * ratio) 
    while len(sample) < n_sample: 
     index = randrange(len(dataset)) 
     sample.append(dataset[index]) 
    return sample 


# Make a prediction with a list of bagged trees 
def bagging_predict(trees, row): 
    predictions = [predict(tree, row) for tree in trees] 
    return max(set(predictions), key=predictions.count) 


# Random Forest Algorithm 
def random_forest(train, test, max_depth, min_size, sample_size, n_trees, n_features): 
    trees = list() 
    for i in range(n_trees): 
     sample = subsample(train, sample_size) 
     tree = build_tree(sample, max_depth, min_size, n_features) 
     trees.append(tree) 
    predictions = [bagging_predict(trees, row) for row in test] 
    return (predictions) 

Afin de généraliser il sera exécuté pour chaque ensemble de données que j'écrit ce qui suit:

import pandas as pd 
file_path ='http://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data' 
dataset2 =pd.read_csv(file_path, header=None, sep=',') 
v = dataset2.values 

f = pd.factorize(v.ravel())[0].reshape(v.shape) 

dataset1 = pd.DataFrame(f) 
df = dataset1.astype('str') 

dataset = df.values.tolist() 
target_index = 60 
for i in range(0, len(dataset[0])): 
     if i != target_index: 
      str_column_to_float(dataset, i) 
# convert class column to integers 
str_column_to_int(dataset, target_index) 
n_folds = 5 
max_depth = 10 
min_size = 1 
sample_size = 1.0 
n_features = int(sqrt(len(dataset[0]) - 1)) 


for n_trees in [5]: 
    scores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features) 
    print('Trees: %d' % n_trees) 
    print('Scores: %s' % scores) 
    print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores)))) 

ci-dessus code mentionné fonctionne très bien pour l'ensemble de données SONAR. Sa structure est:

0.0200,0.0371,0.0428,0.0207,0.0954,0.0986,0.1539,0.1601,0.3109,0.2111,0.1609,0.1582,0.2238,0.0645,0.0660,0.2273,0.3100,0.2999,0.5078,0.4797,0.5783,0.5071,0.4328,0.5550,0.6711,0.6415,0.7104,0.8080,0.6791,0.3857,0.1307,0.2604,0.5121,0.7547,0.8537,0.8507,0.6692,0.6097,0.4943,0.2744,0.0510,0.2834,0.2825,0.4256,0.2641,0.1386,0.1051,0.1343,0.0383,0.0324,0.0232,0.0027,0.0065,0.0159,0.0072,0.0167,0.0180,0.0084,0.0090,0.0032,R 
0.0453,0.0523,0.0843,0.0689,0.1183,0.2583,0.2156,0.3481,0.3337,0.2872,0.4918,0.6552,0.6919,0.7797,0.7464,0.9444,1.0000,0.8874,0.8024,0.7818,0.5212,0.4052,0.3957,0.3914,0.3250,0.3200,0.3271,0.2767,0.4423,0.2028,0.3788,0.2947,0.1984,0.2341,0.1306,0.4182,0.3835,0.1057,0.1840,0.1970,0.1674,0.0583,0.1401,0.1628,0.0621,0.0203,0.0530,0.0742,0.0409,0.0061,0.0125,0.0084,0.0089,0.0048,0.0094,0.0191,0.0140,0.0049,0.0052,0.0044,R 

Ce sont les résultats (qui semble OK):

Trees: 5 
Scores: [100.0, 95.1219512195122, 100.0, 97.5609756097561, 100.0] 
Mean Accuracy: 98.537% 

Quand je change les données en cancer du sein-WISCONSIN:

842302,M,17.99,10.38,122.8,1001,0.1184,0.2776,0.3001,0.1471,0.2419,0.07871,1.095,0.9053,8.589,153.4,0.006399,0.04904,0.05373,0.01587,0.03003,0.006193,25.38,17.33,184.6,2019,0.1622,0.6656,0.7119,0.2654,0.4601,0.1189 
842517,M,20.57,17.77,132.9,1326,0.08474,0.07864,0.0869,0.07017,0.1812,0.05667,0.5435,0.7339,3.398,74.08,0.005225,0.01308,0.0186,0.0134,0.01389,0.003532,24.99,23.41,158.8,1956,0.1238,0.1866,0.2416,0.186,0.275,0.08902 

je change la code pertinent en:

import pandas as pd 
file_path ='https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data' 
dataset2 =pd.read_csv(file_path, header=None, sep=',') 
v = dataset2.values 

f = pd.factorize(v.ravel())[0].reshape(v.shape) 

dataset1 = pd.DataFrame(f) 
df = dataset1.astype('str') 

dataset = df.values.tolist() 
target_index = 1 ## <---- 
for i in range(0, len(dataset[0])): 
     if i != target_index: 
      str_column_to_float(dataset, i) 
# convert class column to integers 
str_column_to_int(dataset, target_index) 
n_folds = 5 
max_depth = 10 
min_size = 1 
sample_size = 1.0 
n_features = int(sqrt(len(dataset[0]) - 1)) 


for n_trees in [5]: 
    scores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features) 
    print('Trees: %d' % n_trees) 
    print('Scores: %s' % scores) 
    print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores)))) 

Je fonctionne pendant un temps très long et les résultats semble erroné:

Trees: 5 
Scores: [0.0, 0.0, 0.0, 0.8849557522123894, 0.0] 
Mean Accuracy: 0.177% 
+0

Avez-vous essayé des passages répétés? Le mot ** random ** dans le nom de la méthode * Random Forests * est là pour une raison ... Que diriez-vous d'essayer quelques runs? – sophros

Répondre

1

AFAIK, la nature de la méthode forêt aléatoire est très dépendant des données et la méthode est sensible à la fois à la semence aléatoire et le bruit dans la Les données. Par conséquent, changer l'ensemble de données en un autre avec des caractéristiques différentes de bruit et de séparabilité de classe pourrait produire des résultats médiocres même si cela fonctionnait parfaitement pour un autre ensemble de données.

Il y a aussi un facteur de chance pure dans la partie aléatoire de la méthode ... Par conséquent, tous les résultats obtenus doivent être répétés pour la validation. Ce peut être juste une malchance de cette exécution particulière bien que vos résultats suggèrent que la méthode est juste mal adaptée à l'ensemble de données.

Si vous avez vraiment besoin de plonger dans le sujet de la forêt aléatoire, je voudrais suggérer un résumé complet dans (librement disponible) Understanding Random Forests: From Theory to Practice par Gilles Louppe.

Il existe également une discussion intéressante sur la sensibilité des valeurs aberrantes de la méthode sur le forum CrossValidated.

+0

Merci @sophros, comme le montrent les résultats de l'ensemble de données sur le cancer du sein-wisconsin: 4 sur 5 plis donnent 0% de précision alors que le cinquième donne 88,5% quelque chose semble louche! – Avi