Merge branch 'main' of gitlab.constantvzw.org:anais_berck/random_forest into main
commit
f045b8158d
@ -0,0 +1,426 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Random Forest"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from random import seed\n",
|
||||||
|
"from random import randrange\n",
|
||||||
|
"from csv import reader\n",
|
||||||
|
"from math import sqrt\n",
|
||||||
|
"import json\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
" # Load a CSV file. Definition of the function to read the csv and create dataset here\n",
|
||||||
|
"def load_csv(filename):\n",
|
||||||
|
"\tdataset = list()\n",
|
||||||
|
"\twith open(filename, 'r') as file:\n",
|
||||||
|
"\t\tcsv_reader = reader(file)\n",
|
||||||
|
"\t\tfor row in csv_reader:\n",
|
||||||
|
"\t\t\tif not row:\n",
|
||||||
|
"\t\t\t\tcontinue\n",
|
||||||
|
"\t\t\tdataset.append(row)\n",
|
||||||
|
"\treturn dataset\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Convert string column to float\n",
|
||||||
|
"def str_column_to_float(dataset, column):\n",
|
||||||
|
"\tfor row in dataset:\n",
|
||||||
|
"\t\trow[column] = float(row[column].strip())\n",
|
||||||
|
" \n",
|
||||||
|
"# Convert string column to integer\n",
|
||||||
|
"def str_column_to_int(dataset, column):\n",
|
||||||
|
"\tclass_values = [row[column] for row in dataset] # extract the values of the column (here the classes of the dataset, mine and rocks)\n",
|
||||||
|
"\tunique = set(class_values) # calculate how many unique class values there are and store them into a set: a list with unique values\n",
|
||||||
|
"\tlookup = dict() #create a dictionnary\n",
|
||||||
|
"\tfor i, value in enumerate(unique): # loops through the set / enumerate gives you a tuple with an index number and a value /common way to get indexes from a list\n",
|
||||||
|
"\t\tlookup[value] = i # the key of the dictonnary is the value: mine or rock; and the value is a number: 0 or 1\n",
|
||||||
|
"\tfor row in dataset: # loops through the rows of the dataset\n",
|
||||||
|
"\t\trow[column] = lookup[row[column]] #replaces the value of the column: rock or mine, with the index value: 0 or 1;\n",
|
||||||
|
"\treturn lookup # the code returns the lookup table\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Split a dataset into k folds\n",
|
||||||
|
"def cross_validation_split(dataset, n_folds):\n",
|
||||||
|
"\tdataset_split = list() #create a list \n",
|
||||||
|
"\tdataset_copy = list(dataset) # creates a list of the dataset. You could use the copy library\n",
|
||||||
|
"\tfold_size = int(len(dataset) / n_folds) # the size of the fold is equal to the length of the dataset divided by the amount of folds\n",
|
||||||
|
"\tprint(\"fold size:\")\n",
|
||||||
|
"\tprint(fold_size) \n",
|
||||||
|
"\tfor i in range(n_folds): #loops the amount of folds : generate another list with numbers from 0 up to n_fold\n",
|
||||||
|
"\t\tfold = list() #create a list\n",
|
||||||
|
"\t\twhile len(fold) < fold_size: # as long as the length of the list is inferior to the defined fold size\n",
|
||||||
|
"\t\t\tindex = randrange(len(dataset_copy)) # return a random integer between 0 and the total length of the dataset and store it in index\n",
|
||||||
|
"\t\t\tfold.append(dataset_copy.pop(index)) # append an observation at the index and removes it from the dataset\n",
|
||||||
|
"\t\tdataset_split.append(fold) # append the fold to the list dataset_split\n",
|
||||||
|
"\t\t#print(\"______________\")\n",
|
||||||
|
"\t\t#print(\"dataset split:\")\n",
|
||||||
|
"\t\t#print(dataset_split)\n",
|
||||||
|
"\t\t#print(\"______________\")\n",
|
||||||
|
"\treturn dataset_split #return the dataset_split, a list of folds\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Calculate accuracy percentage\n",
|
||||||
|
"def accuracy_metric(actual, predicted):\n",
|
||||||
|
"\tcorrect = 0 #create a correct variable\n",
|
||||||
|
"\tfor i in range(len(actual)): # loops up to the length of the actual list\n",
|
||||||
|
"\t\tif actual[i] == predicted[i]: # compares the actual vs the predicted\n",
|
||||||
|
"\t\t\tcorrect += 1 #if correct add one to the correct variable. Count the number of correct guesses\n",
|
||||||
|
"\treturn correct / float(len(actual)) * 100.0 #gives a percentage by dividing the correct guesses by the length of the actual classes and multiply by a hundred\n",
|
||||||
|
" "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Evaluate an algorithm using a cross validation split\n",
|
||||||
|
"def evaluate_algorithm(dataset, algorithm, n_folds, *args):\n",
|
||||||
|
"\tfolds = cross_validation_split(dataset, n_folds) #return the list of folds\n",
|
||||||
|
"\tscores = list() #creates a list called scores\n",
|
||||||
|
"\tfor fold in folds: #loops through the folds\n",
|
||||||
|
"\t\ttrain_set = list(folds) #creates a copy of the list of folds \n",
|
||||||
|
"\t\ttrain_set.remove(fold) #remove one fold: for testing?\n",
|
||||||
|
"\t\ttrain_set = sum(train_set, []) # concatenate all folds, a list of lists, into one list. can be done with itertools.chain\n",
|
||||||
|
"\t\ttest_set = list() # create another list\n",
|
||||||
|
"\t\tfor row in fold: # iterates through fold\n",
|
||||||
|
"\t\t\trow_copy = list(row) # creates a copy of the list\n",
|
||||||
|
"\t\t\ttest_set.append(row_copy) # append the list to the test_set\n",
|
||||||
|
"\t\t\trow_copy[-1] = None # set the classification to none. Changes the last column to none.\n",
|
||||||
|
"\t\tpredicted = algorithm(train_set, test_set, *args) # what is doing the prediction takes for argument the train and test set and returns prediction for the test set\n",
|
||||||
|
"\t\tactual = [row[-1] for row in fold] # list comprehension: list of actual classes from fold.\n",
|
||||||
|
"\t\taccuracy = accuracy_metric(actual, predicted) # function that compares the actual vs the predicted to give an idea of the accuracy of the prediction\n",
|
||||||
|
"\t\tscores.append(accuracy) #append the accuracy to the list of scores\n",
|
||||||
|
"\treturn scores\n",
|
||||||
|
" "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# Split a dataset based on an attribute/feature and an attribute/feature value\n",
|
||||||
|
"def test_split(index, value, dataset):\n",
|
||||||
|
"\tleft, right = list(), list() # create two lists for each side\n",
|
||||||
|
"\tfor row in dataset: #iterate through each row of the dataset\n",
|
||||||
|
"\t\tif row[index] < value: #if the feature value of the current row is below the feature value given\n",
|
||||||
|
"\t\t\tleft.append(row) # append it to the left list\n",
|
||||||
|
"\t\telse:\n",
|
||||||
|
"\t\t\tright.append(row) # append it to the right list\n",
|
||||||
|
"\treturn left, right # return the two lists\n",
|
||||||
|
" \n",
|
||||||
|
"# Calculate the Gini index for a split dataset\n",
|
||||||
|
"def gini_index(groups, classes):\n",
|
||||||
|
"\t# count all samples at split point\n",
|
||||||
|
"\tn_instances = float(sum([len(group) for group in groups])) #counting the total number of instances into float for divisions\n",
|
||||||
|
"\t# sum weighted Gini index for each group\n",
|
||||||
|
"\tgini = 0.0 #gini variable\n",
|
||||||
|
"\tfor group in groups: #for each of the group\n",
|
||||||
|
"\t\tsize = float(len(group)) #number of instances in each group\n",
|
||||||
|
"\t\t# avoid divide by zero\n",
|
||||||
|
"\t\tif size == 0:\n",
|
||||||
|
"\t\t\tcontinue\n",
|
||||||
|
"\t\tscore = 0.0\n",
|
||||||
|
"\t\t# score the group based on the score for each class\n",
|
||||||
|
"\t\tfor class_val in classes:\n",
|
||||||
|
"\t\t\tp = [row[-1] for row in group].count(class_val) / size #count the number of instances for the current class in the group and divide it by the total size of the group\n",
|
||||||
|
"\t\t\tscore += p * p #amplifying the difference (exponential ?)\n",
|
||||||
|
"\t\t# weight the group score by its relative size\n",
|
||||||
|
"\t\tgini += (1.0 - score) * (size / n_instances) # substract the score from 1 and multiply it by the relative size of the group compared to the dataset\n",
|
||||||
|
"\treturn gini\n",
|
||||||
|
" \n",
|
||||||
|
"# Select the best split point for a dataset\n",
|
||||||
|
"def get_split(dataset, n_features):\n",
|
||||||
|
"\tclass_values = list(set(row[-1] for row in dataset)) # creates a list of the set for the class values. Here encoded as 1 and 0 . We already did it before\n",
|
||||||
|
"\tb_index, b_value, b_score, b_groups = 999, 999, 999, None # assign values to variables\n",
|
||||||
|
"\tfeatures = list() #creates a list. \n",
|
||||||
|
"\twhile len(features) < n_features: # as long as features is smaller that the actual number of desirable features= n_features\n",
|
||||||
|
"\t\tindex = randrange(len(dataset[0])-1) # create a random number between 0 and the number of columns-1= minus the class\n",
|
||||||
|
"\t\tif index not in features: # if the column names is not already in the features\n",
|
||||||
|
"\t\t\tfeatures.append(index) # append the column name\n",
|
||||||
|
"\tfor index in features: # for each column name(=index name) in features\n",
|
||||||
|
"\t\tfor row in dataset: # for each row of the dataset\n",
|
||||||
|
"\t\t\tgroups = test_split(index, row[index], dataset) # get two lists. very computationnaly heavy. Why not do the ordering first?\n",
|
||||||
|
"\t\t\tgini = gini_index(groups, class_values) # get the gini value\n",
|
||||||
|
"\t\t\tif gini < b_score: #if the gini value is smaller than b_score (b for best?). this should always be true for the first operation. Test against the best option\n",
|
||||||
|
"\t\t\t\tb_index, b_value, b_score, b_groups = index, row[index], gini, groups # update the values for the best option\n",
|
||||||
|
"\treturn {'index':b_index, 'value':b_value, 'groups':b_groups} #return the best option in the form of a dictionnary\n",
|
||||||
|
" \n",
|
||||||
|
"# Create a terminal node value\n",
|
||||||
|
"# Returns the most popular value in the group\n",
|
||||||
|
"def to_terminal(group):\n",
|
||||||
|
"\toutcomes = [row[-1] for row in group] #takes the class of the group elements and put it in a list\n",
|
||||||
|
"\treturn max(set(outcomes), key=outcomes.count) #return the class that appears the most. set is counting the different class and the key is counting the number of occurences of this class. SPOOKY AND DENSE\n",
|
||||||
|
" \n",
|
||||||
|
"# Create child splits for a node or make a terminal (node)\n",
|
||||||
|
"def split(node, max_depth, min_size, n_features, depth):\n",
|
||||||
|
"\tleft, right = node['groups'] #get the groups using the dictionnary node with the key groups\n",
|
||||||
|
"\tdel(node['groups']) #delete dictionnary element with key groups\n",
|
||||||
|
"\t# check for a no split\n",
|
||||||
|
"\tif not left or not right: #if one of the group is empty\n",
|
||||||
|
"\t\tnode['left'] = node['right'] = to_terminal(left + right) #call this function\n",
|
||||||
|
"\t\treturn \n",
|
||||||
|
"\t# check for max depth\n",
|
||||||
|
"\tif depth >= max_depth: # checking if level is bigger or equal than maximum level of nodes\n",
|
||||||
|
"\t\tnode['left'], node['right'] = to_terminal(left), to_terminal(right) #if yes, so both nodes are terminal nodes\n",
|
||||||
|
"\t\treturn\n",
|
||||||
|
"\t# process left child\n",
|
||||||
|
"\tif len(left) <= min_size: # if the group is smaller or equal than the minimum size for a group\n",
|
||||||
|
"\t\tnode['left'] = to_terminal(left) #left node is a terminal node\n",
|
||||||
|
"\telse:\n",
|
||||||
|
"\t\tnode['left'] = get_split(left, n_features) #create another split, another node from which to separate in two groups with a subset of a dataset\n",
|
||||||
|
"\t\tsplit(node['left'], max_depth, min_size, n_features, depth+1) #recursion -> calls itself\n",
|
||||||
|
"\t# process right child\n",
|
||||||
|
"\tif len(right) <= min_size:\n",
|
||||||
|
"\t\tnode['right'] = to_terminal(right)\n",
|
||||||
|
"\telse:\n",
|
||||||
|
"\t\tnode['right'] = get_split(right, n_features)\n",
|
||||||
|
"\t\tsplit(node['right'], max_depth, min_size, n_features, depth+1)\n",
|
||||||
|
" \n",
|
||||||
|
"# Build a decision tree\n",
|
||||||
|
"def build_tree(train, max_depth, min_size, n_features):\n",
|
||||||
|
"\troot = get_split(train, n_features) #get the index, value and group (?) for the split. It's a dictionnary\n",
|
||||||
|
"\tsplit(root, max_depth, min_size, n_features, 1) #depth is one\n",
|
||||||
|
" # root is Node\n",
|
||||||
|
" # Node: { index: int, value: float, left: Node|TerminalNode, right: Node|TerminalNode }\n",
|
||||||
|
" # TerminalNode: { index: int, value: float, left: int(class), right: int(class) }\n",
|
||||||
|
"\treturn root #dictionnary with index, value, left, right\n",
|
||||||
|
" \n",
|
||||||
|
"# Make a prediction with a decision tree\n",
|
||||||
|
"def predict(node, row):\n",
|
||||||
|
"\tif row[node['index']] < node['value']: #if the feature value of the row is smaller of the feature value of the node\n",
|
||||||
|
"\t\tif isinstance(node['left'], dict): # is it a node or a terminal node(children are not node)\n",
|
||||||
|
"\t\t\treturn predict(node['left'], row) #recursion if a node. the function calls itselft on the following left node\n",
|
||||||
|
"\t\telse:\n",
|
||||||
|
"\t\t\treturn node['left'] # result if a terminal node\n",
|
||||||
|
"\telse:\n",
|
||||||
|
"\t\tif isinstance(node['right'], dict): # is it a node or a terminal node(children are not node)\n",
|
||||||
|
"\t\t\treturn predict(node['right'], row)#recursion if a node. the function calls itselft on the following right node\n",
|
||||||
|
"\t\telse:\n",
|
||||||
|
"\t\t\treturn node['right']# result if a terminal node\n",
|
||||||
|
" \n",
|
||||||
|
"# Create a random subsample from the dataset with replacement\n",
|
||||||
|
"def subsample(dataset, ratio):\n",
|
||||||
|
"\tsample = list() #creates a list\n",
|
||||||
|
"\tn_sample = round(len(dataset) * ratio) # rounds the multiplication of the length of the dataset with the sample.size: here 1. \n",
|
||||||
|
"\twhile len(sample) < n_sample: # loop up until the length of the sample is the length of n_sample\n",
|
||||||
|
"\t\tindex = randrange(len(dataset)) #take a random number from 0 up to length of the dataset\n",
|
||||||
|
"\t\tsample.append(dataset[index]) # append a sample whith this index\n",
|
||||||
|
"\treturn sample # return the list of sub-samples\n",
|
||||||
|
" \n",
|
||||||
|
"# Make a prediction with a list of bagged trees\n",
|
||||||
|
"def bagging_predict(trees, row): \n",
|
||||||
|
"\tpredictions = [predict(tree, row) for tree in trees] # we run the prediction in each tree, this gives a list of predictions of classes/votes. THE TREES ARE VOTING.\n",
|
||||||
|
"\treturn max(set(predictions), key=predictions.count) # we count the class with the maximum votes and return it as prediction\n",
|
||||||
|
" \n",
|
||||||
|
"# Random Forest Algorithm\n",
|
||||||
|
"def random_forest(train, test, max_depth, min_size, sample_size, n_trees, n_features):\n",
|
||||||
|
"\ttrees = list() #create a list of trees\n",
|
||||||
|
"\tfor i in range(n_trees): \n",
|
||||||
|
"\t\tsample = subsample(train, sample_size) # create subsamples \n",
|
||||||
|
"\t\ttree = build_tree(sample, max_depth, min_size, n_features) #build the tree\n",
|
||||||
|
"\t\ttrees.append(tree)#append the list \n",
|
||||||
|
"\twith open(\"model.json\", \"w\") as out_file: \n",
|
||||||
|
"\t\tjson.dump(trees, out_file, indent = 6)\n",
|
||||||
|
"\tpredictions = [bagging_predict(trees, row) for row in test] # testing with test data, running the prediction on every row. THE FOREST VOTES ON EVERY ROW\n",
|
||||||
|
"\treturn(predictions) # we return the predicted class of each of the rows as a list. THE PREDICTIONS OF THE FOREST\n",
|
||||||
|
" "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from random import random"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Test the random forest algorithm\n",
|
||||||
|
"seed(2) #put the random generator in a certain state -> makes it deterministic otherwise python takes the time of the computer\n",
|
||||||
|
"#print(random())\n",
|
||||||
|
"#print(random())\n",
|
||||||
|
"#seed(2)\n",
|
||||||
|
"#print(random())\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# load and prepare data\n",
|
||||||
|
"filename = 'iris.csv'\n",
|
||||||
|
"#filename = 'iris.csv'\n",
|
||||||
|
"\n",
|
||||||
|
"dataset = load_csv(filename)\n",
|
||||||
|
"#print(dataset)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# convert string attributes to integers\n",
|
||||||
|
"for i in range(0, len(dataset[0])-1):\n",
|
||||||
|
"\tstr_column_to_float(dataset, i)\n",
|
||||||
|
"# convert class column to integers\n",
|
||||||
|
"#str_column_to_int(dataset, len(dataset[0])-1) #this function loops through the rows and transforms the words mine and roch into 1 and 0\n",
|
||||||
|
"#print(dataset)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# evaluate algorithm\n",
|
||||||
|
"n_folds = 5 #the data is randomly sampled into 5 subsamples, one is kept for testing the 4 else are used for training.\n",
|
||||||
|
"max_depth = 10 #\n",
|
||||||
|
"min_size = 1\n",
|
||||||
|
"sample_size = 1.0 #fixed, can be changed to reduces the amount of subsampling, if it is smaller than one.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 13,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"fold size:\n",
|
||||||
|
"30\n",
|
||||||
|
"Trees: 1\n",
|
||||||
|
"Scores: [100.0, 93.33333333333333, 90.0, 100.0, 93.33333333333333]\n",
|
||||||
|
"Mean Accuracy: 95.333%\n",
|
||||||
|
"fold size:\n",
|
||||||
|
"30\n",
|
||||||
|
"Trees: 5\n",
|
||||||
|
"Scores: [96.66666666666667, 96.66666666666667, 100.0, 93.33333333333333, 93.33333333333333]\n",
|
||||||
|
"Mean Accuracy: 96.000%\n",
|
||||||
|
"fold size:\n",
|
||||||
|
"30\n",
|
||||||
|
"Trees: 10\n",
|
||||||
|
"Scores: [90.0, 96.66666666666667, 96.66666666666667, 86.66666666666667, 100.0]\n",
|
||||||
|
"Mean Accuracy: 94.000%\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"n_features = int(sqrt(len(dataset[0])-1)) #it specifies the size of the feature subset for the folds, where the size is close to the square root of the total number of features\n",
|
||||||
|
"for n_trees in [1, 5, 10]: # will loop three times by taking n_trees equals one, n_trees equals five and n_trees equals ten successively.\n",
|
||||||
|
"\tscores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features)\n",
|
||||||
|
"\tprint('Trees: %d' % n_trees)\n",
|
||||||
|
"\tprint('Scores: %s' % scores)\n",
|
||||||
|
"\tprint('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 19,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Iris Versicolor\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"with open (\"model.json\", \"r\") as in_file:\n",
|
||||||
|
"\ttrees=json.load(in_file)\n",
|
||||||
|
"\tprediction=bagging_predict(trees,dataset[55])\n",
|
||||||
|
"\tprint(prediction)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
Loading…
Reference in New Issue