Compare commits
No commits in common. "76c5f61ddaded54305c0538f842cef67ed40ae89" and "8850f957ae8e70a8dec403678427009bc0613b23" have entirely different histories.
76c5f61dda
...
8850f957ae
@ -1,14 +1,14 @@
|
||||
# models
|
||||
from enum import Enum
|
||||
from sklearn import tree
|
||||
from sklearn import metrics
|
||||
from sklearn import preprocessing
|
||||
from sklearn import neighbors
|
||||
from sklearn import ensemble
|
||||
from sklearn import svm
|
||||
|
||||
# other
|
||||
from enum import Enum
|
||||
from matplotlib import pyplot as plt
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import time
|
||||
import random
|
||||
import csv
|
||||
import plots
|
||||
|
||||
@ -49,68 +49,58 @@ with open(PATH, 'r') as file:
|
||||
normalized = preprocessing.normalize(data, axis=0, norm='max')
|
||||
norm = list(normalized.tolist())
|
||||
|
||||
steps = np.linspace(1e-4, 1, 20, dtype=np.float64)
|
||||
steps = np.linspace(0.1, 1.0, 10, dtype=np.float64)
|
||||
|
||||
print("Step \t seconds/step")
|
||||
for step in steps:
|
||||
actual = []
|
||||
predicted = []
|
||||
time_start = time.time()
|
||||
|
||||
for j in range(3):
|
||||
for i in range(len(norm)):
|
||||
temp_data = norm.pop(i)
|
||||
temp_label = labels.pop(i)
|
||||
for i in range(len(norm)):
|
||||
temp_data = norm.pop(i)
|
||||
temp_label = labels.pop(i)
|
||||
|
||||
# model = tree.DecisionTreeClassifier(
|
||||
# class_weight=None,
|
||||
# min_samples_leaf=2,
|
||||
# max_depth=None, # < 5 is worse, None good too
|
||||
# random_state=False, # No change
|
||||
# criterion='gini', # MCC + 0.1
|
||||
# splitter='best',
|
||||
# ccp_alpha=0 # Pruning: Keep this 0
|
||||
# )
|
||||
# model = ensemble.RandomForestClassifier(
|
||||
# n_estimators=20, # higher is better, but slower (def: 100)
|
||||
# criterion='gini', # gini best
|
||||
# )
|
||||
# model = ensemble.ExtraTreesClassifier(
|
||||
# n_estimators=step # higher is better, but slower (def: 100)
|
||||
# )
|
||||
# model = neighbors.KNeighborsClassifier(
|
||||
# algorithm='auto',
|
||||
# leaf_size=2,
|
||||
# n_neighbors=step,
|
||||
# )
|
||||
# model = ensemble.BaggingClassifier(
|
||||
# n_estimators=5,
|
||||
# max_samples=.5,
|
||||
# max_features=.5,
|
||||
# bootstrap=False
|
||||
# )
|
||||
# model = svm.SVC(
|
||||
# C = 0.8,
|
||||
# kernel = "poly",
|
||||
# degree = 5,
|
||||
# coef0 = 6,
|
||||
# probability = False,
|
||||
# break_ties=True,
|
||||
# decision_function_shape = 'ovr'
|
||||
# )
|
||||
model = model.fit(norm, labels)
|
||||
result = model.predict([temp_data])
|
||||
# model = tree.DecisionTreeClassifier(
|
||||
# class_weight=None,
|
||||
# min_samples_leaf=2,
|
||||
# max_depth=None, # < 5 is worse, None good too
|
||||
# random_state=False, # No change
|
||||
# criterion='gini', # MCC + 0.1
|
||||
# splitter='best',
|
||||
# ccp_alpha=0 # Pruning: Keep this 0
|
||||
# )
|
||||
# model = ensemble.RandomForestClassifier(
|
||||
# n_estimators=20, # higher is better, but slower (def: 100)
|
||||
# criterion='gini', # gini best
|
||||
# )
|
||||
# model = ensemble.ExtraTreesClassifier(
|
||||
# n_estimators=150 # higher is better, but slower (def: 100)
|
||||
# )
|
||||
# model = neighbors.KNeighborsClassifier(
|
||||
# algorithm='auto',
|
||||
# leaf_size=2,
|
||||
# n_neighbors=step,
|
||||
# )
|
||||
model = ensemble.BaggingClassifier(
|
||||
n_estimators=5,
|
||||
max_samples=.5,
|
||||
max_features=.5,
|
||||
bootstrap=False
|
||||
)
|
||||
# model = svm.SVC(decision_function_shape='ovr'
|
||||
# )
|
||||
model = model.fit(norm, labels)
|
||||
result = model.predict([temp_data])
|
||||
|
||||
norm.append(temp_data)
|
||||
labels.append(temp_label)
|
||||
norm.append(temp_data)
|
||||
labels.append(temp_label)
|
||||
|
||||
actual.append(temp_label)
|
||||
predicted.append(result[0])
|
||||
actual.append(temp_label)
|
||||
predicted.append(result[0])
|
||||
|
||||
actual_list.append(actual)
|
||||
predicted_list.append(predicted)
|
||||
|
||||
print("%.4f"%step, "\t", "%.2f"%(time.time()-time_start))
|
||||
print(step)
|
||||
|
||||
plots.plotMetrics(actual_list, predicted_list)
|
||||
plots.plotConfusion(actual_list[0], predicted_list[0])
|
||||
|
@ -94,7 +94,6 @@ detector = cv2.aruco.ArucoDetector(dictionary, detector_params)
|
||||
|
||||
images_converted = 0
|
||||
images_skipped = 0
|
||||
names_skipped = []
|
||||
|
||||
### IMAGE CONVERSIE ###
|
||||
for folder in os.listdir(input_directory):
|
||||
@ -148,12 +147,16 @@ for folder in os.listdir(input_directory):
|
||||
if VERBOSE:
|
||||
print("IDs detected:\n", ids)
|
||||
|
||||
if ids is None or len(ids) != 4:
|
||||
if ids is None:
|
||||
print("Skipping: ", filename)
|
||||
print("=============================================")
|
||||
names_skipped.append(filename)
|
||||
images_skipped += 1
|
||||
continue
|
||||
if len(ids) != 4:
|
||||
print("Skipping: ", filename)
|
||||
print("=============================================")
|
||||
images_skipped += 1
|
||||
continue
|
||||
|
||||
if VERBOSE:
|
||||
print("%d markers gedetecteerd" %len(ids))
|
||||
@ -253,12 +256,5 @@ for folder in os.listdir(input_directory):
|
||||
if VERBOSE:
|
||||
print("%d van de %d succesvol"
|
||||
%(images_converted, (images_converted+images_skipped)))
|
||||
|
||||
if images_skipped != 0:
|
||||
print("")
|
||||
with open(os.path.join(input_directory, "skipped.txt"), 'w') as file:
|
||||
for name in names_skipped:
|
||||
file.write(name)
|
||||
file.write("\n")
|
||||
|
||||
cv2.destroyAllWindows()
|
Loading…
Reference in New Issue
Block a user