Reorganized flat 41-file directory into structured layout with: - scripts/ for Python analysis code with shared config.py - notebooks/ for Jupyter analysis notebooks - data/ split into raw/, metadata/, processed/ - docs/ with analysis summary, experimental design, and bimodal hypothesis tutorial - tasks/ with todo checklist and lessons learned - Comprehensive README, PLANNING.md, and .gitignore Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
97 lines
3.6 KiB
Python
97 lines
3.6 KiB
Python
import pandas as pd
|
|
import numpy as np
|
|
from sklearn.model_selection import train_test_split, cross_val_score
|
|
from sklearn.ensemble import RandomForestClassifier
|
|
from sklearn.linear_model import LogisticRegression
|
|
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
|
|
from sklearn.preprocessing import StandardScaler
|
|
from sklearn.impute import SimpleImputer
|
|
import matplotlib.pyplot as plt
|
|
import seaborn as sns
|
|
|
|
from config import DATA_PROCESSED, FIGURES
|
|
|
|
# Load data
|
|
trained_distances = pd.read_csv(DATA_PROCESSED / 'trained_distances.csv')
|
|
untrained_distances = pd.read_csv(DATA_PROCESSED / 'untrained_distances.csv')
|
|
|
|
# Add group labels
|
|
trained_distances['group'] = 'trained'
|
|
untrained_distances['group'] = 'untrained'
|
|
|
|
# Combine data
|
|
combined_data = pd.concat([trained_distances, untrained_distances], ignore_index=True)
|
|
combined_data = combined_data.dropna(subset=['group'])
|
|
|
|
# Prepare features and target
|
|
features = ['distance', 'n_flies', 'area_fly1', 'area_fly2']
|
|
X = combined_data[features]
|
|
y = combined_data['group']
|
|
|
|
# Handle missing values in features
|
|
imputer = SimpleImputer(strategy='mean')
|
|
X_imputed = pd.DataFrame(imputer.fit_transform(X), columns=features)
|
|
|
|
# Split data
|
|
X_train, X_test, y_train, y_test = train_test_split(X_imputed, y, test_size=0.2, random_state=42)
|
|
|
|
# Standardize features
|
|
scaler = StandardScaler()
|
|
X_train_scaled = scaler.fit_transform(X_train)
|
|
X_test_scaled = scaler.transform(X_test)
|
|
|
|
print("=== MACHINE LEARNING CLASSIFICATION ===")
|
|
print(f"Training set size: {len(X_train)}")
|
|
print(f"Testing set size: {len(X_test)}")
|
|
|
|
# 1. Logistic Regression
|
|
print("\n1. Logistic Regression:")
|
|
lr_model = LogisticRegression(random_state=42)
|
|
lr_model.fit(X_train_scaled, y_train)
|
|
lr_predictions = lr_model.predict(X_test_scaled)
|
|
lr_accuracy = accuracy_score(y_test, lr_predictions)
|
|
print(f"Accuracy: {lr_accuracy:.4f}")
|
|
print("\nClassification Report:")
|
|
print(classification_report(y_test, lr_predictions))
|
|
|
|
# 2. Random Forest
|
|
print("\n2. Random Forest:")
|
|
rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
|
|
rf_model.fit(X_train, y_train)
|
|
rf_predictions = rf_model.predict(X_test)
|
|
rf_accuracy = accuracy_score(y_test, rf_predictions)
|
|
print(f"Accuracy: {rf_accuracy:.4f}")
|
|
print("\nClassification Report:")
|
|
print(classification_report(y_test, rf_predictions))
|
|
|
|
# Feature importance
|
|
print("\nFeature Importance (Random Forest):")
|
|
feature_importance = pd.DataFrame({
|
|
'feature': features,
|
|
'importance': rf_model.feature_importances_
|
|
}).sort_values('importance', ascending=False)
|
|
print(feature_importance)
|
|
|
|
# Confusion matrix for the best model
|
|
best_model_name = "Random Forest" if rf_accuracy > lr_accuracy else "Logistic Regression"
|
|
best_predictions = rf_predictions if rf_accuracy > lr_accuracy else lr_predictions
|
|
|
|
plt.figure(figsize=(8, 6))
|
|
cm = confusion_matrix(y_test, best_predictions)
|
|
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
|
|
xticklabels=['Trained', 'Untrained'],
|
|
yticklabels=['Trained', 'Untrained'])
|
|
plt.title(f'Confusion Matrix - {best_model_name}')
|
|
plt.xlabel('Predicted')
|
|
plt.ylabel('Actual')
|
|
plt.tight_layout()
|
|
plt.savefig(FIGURES / 'confusion_matrix.png', dpi=300, bbox_inches='tight')
|
|
plt.show()
|
|
|
|
# Cross-validation scores
|
|
print("\n=== CROSS-VALIDATION SCORES ===")
|
|
lr_cv_scores = cross_val_score(LogisticRegression(random_state=42), X_train_scaled, y_train, cv=5)
|
|
rf_cv_scores = cross_val_score(RandomForestClassifier(n_estimators=100, random_state=42), X_train, y_train, cv=5)
|
|
|
|
print(f"Logistic Regression CV Score: {lr_cv_scores.mean():.4f} (+/- {lr_cv_scores.std() * 2:.4f})")
|
|
print(f"Random Forest CV Score: {rf_cv_scores.mean():.4f} (+/- {rf_cv_scores.std() * 2:.4f})")
|