Skip to content
Snippets Groups Projects
Commit 8ce122ef authored by thibault.capt's avatar thibault.capt
Browse files

add for iris

parent 1127a7b3
No related branches found
No related tags found
No related merge requests found
iris.py 0 → 100644
import pandas as pd
import numpy as np
from collections import Counter
def manhattan_distance(x1: np.ndarray, x2: np.ndarray) -> float:
distance = 0
if x1.shape == x2.shape:
for i in range(x1.size):
distance += np.abs(x1[i] - x2[i])
return distance
if __name__ == '__main__':
# Charger les données Iris à partir d'un fichier CSV
data = pd.read_csv('Data/iris.csv', header=None)
# Séparer les caractéristiques (X) et les étiquettes de classe (y)
X = data.iloc[:, :-1].values
labels = data.iloc[:, -1].values
# Définir le nombre de clusters (k)
k = 3 # Par exemple, pour l'ensemble de données Iris, il y a trois classes réelles
# Initialisation aléatoire des centroïdes initiaux
centroids = X[np.random.choice(X.shape[0], k, replace=False)]
# Nombre maximal d'itérations
max_iter = 100
# Algorithme K-Means
for i in range(max_iter):
# Créer des clusters vides
clusters = [[] for _ in range(k)]
# Sauvegarder les anciens centroïdes
for point in X:
distances = [manhattan_distance(point, centroid) for centroid in centroids]
cluster_index = np.argmin(distances)
clusters[cluster_index].append(point)
# Sauvegarder les anciens centroïdes
old_centroids = centroids.copy()
# Mettre à jour les centroïdes en calculant la moyenne des points dans chaque cluster
for j in range(k):
if len(clusters[j]) > 0:
centroids[j] = np.mean(clusters[j], axis=0)
# Convergence ?
if np.all(old_centroids == centroids):
break
# Calculer la variance totale
total_variance = 0.0
for cluster_index, cluster_points in enumerate(clusters):
cluster_center = centroids[cluster_index]
for point in cluster_points:
total_variance += manhattan_distance(point, cluster_center) ** 2
# Calculer le taux de classification par cluster et la classe majoritaire par cluster
cluster_classifications = {}
cluster_majority_class = {}
for cluster_index, cluster_points in enumerate(clusters):
cluster_labels = labels[np.isin(X, cluster_points).all(axis=1)]
cluster_counts = Counter(cluster_labels)
cluster_classifications[cluster_index] = cluster_counts
cluster_majority_class[cluster_index] = cluster_counts.most_common(1)[0][0]
# Afficher la variance totale
print(f'Variance totale : {total_variance}')
# Afficher le taux de classification par cluster et la classe majoritaire par cluster
for cluster_index in range(k):
print(f'Cluster {cluster_index + 1} - Taux de classification : {cluster_classifications[cluster_index]}')
print(f'Cluster {cluster_index + 1} - Classe majoritaire : {cluster_majority_class[cluster_index]}')
\ No newline at end of file
File moved
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment