Files
protocol-bicorder/analysis/scripts/lda_visualization.py
Nathan Schneider 897c30406b Reorganize directory, add manual dataset and sync tooling
- Move all scripts to scripts/, web assets to web/, analysis results
  into self-contained data/readings/<type>_<YYYYMMDD>/ directories
- Add data/readings/manual_20260320/ with 32 JSON readings from
  git.medlab.host/ntnsndr/protocol-bicorder-data
- Add scripts/json_to_csv.py to convert bicorder JSON files to CSV
- Add scripts/sync_readings.sh for one-command sync + re-analysis of
  any dataset backed by a .sync_source config file
- Add scripts/classify_readings.py to apply the LDA classifier to all
  readings and save per-reading cluster assignments
- Add --min-coverage flag to multivariate_analysis.py for sparse/shortform
  datasets; also applies in lda_visualization.py
- Fix lda_visualization.py NaN handling and 0-d array annotation bug
- Update README.md and WORKFLOW.md to document datasets, sync workflow,
  shortform handling, and new scripts

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 17:35:13 -06:00

178 lines
7.1 KiB
Python

#!/usr/bin/env python3
"""
Create LDA visualization to maximize cluster separation.
Usage:
python3 scripts/lda_visualization.py data/readings/synthetic_20251116.csv
python3 scripts/lda_visualization.py data/readings/synthetic_20251116.csv --results-dir analysis_results/synthetic_20251116
"""
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import StandardScaler
from pathlib import Path
def main():
parser = argparse.ArgumentParser(
description='Create LDA visualization of cluster separation',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Example usage:
python3 scripts/lda_visualization.py data/readings/synthetic_20251116/readings.csv
python3 scripts/lda_visualization.py data/readings/manual_20260101/readings.csv --analysis-dir data/readings/manual_20260101/analysis
"""
)
parser.add_argument('input_csv', help='Diagnostic readings CSV (e.g. data/readings/synthetic_20251116/readings.csv)')
parser.add_argument('--analysis-dir', default=None,
help='Analysis directory (default: <dataset_dir>/analysis)')
args = parser.parse_args()
dataset_dir = Path(args.input_csv).parent
results_dir = Path(args.analysis_dir) if args.analysis_dir else dataset_dir / 'analysis'
plots_dir = results_dir / 'plots'
data_dir = results_dir / 'data'
# Load the original data
df = pd.read_csv(args.input_csv)
# Identify dimension columns
all_cols = df.columns.tolist()
design_cols = [c for c in all_cols if c.startswith('Design_')]
entanglement_cols = [c for c in all_cols if c.startswith('Entanglement_')]
experience_cols = [c for c in all_cols if c.startswith('Experience_')]
dimension_cols = design_cols + entanglement_cols + experience_cols
# Load cluster assignments
clusters = pd.read_csv(data_dir / 'kmeans_clusters.csv')
df_with_clusters = df.merge(clusters, on='Descriptor')
# Drop dimension columns with low coverage (< 80%) to handle shortform datasets
n = len(df_with_clusters)
coverage = df_with_clusters[dimension_cols].notna().sum() / n
dimension_cols = [c for c in dimension_cols if coverage[c] >= 0.8]
# Prepare data — impute any remaining NaNs with column median
X_df = df_with_clusters[dimension_cols].copy()
X_df = X_df.fillna(X_df.median())
X = X_df.values
y = df_with_clusters['cluster'].values
# Standardize
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Fit LDA (with 1 component for 2 classes)
lda = LinearDiscriminantAnalysis(n_components=1)
X_lda = lda.fit_transform(X_scaled, y).ravel()
# Create histogram showing separation
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(14, 10))
# Histogram
colors = {1: '#2E86AB', 2: '#A23B72'}
for cluster_id in [1, 2]:
cluster_data = X_lda[y == cluster_id]
ax1.hist(cluster_data, bins=30, alpha=0.6,
color=colors[cluster_id],
label=f'Cluster {cluster_id}',
edgecolor='white', linewidth=0.5)
ax1.set_xlabel('Linear Discriminant (LD1)', fontsize=12)
ax1.set_ylabel('Frequency', fontsize=12)
ax1.set_title('Linear Discriminant Analysis: Cluster Separation\n(Maximum separation projection)',
fontsize=14, fontweight='bold')
ax1.legend(fontsize=11)
ax1.grid(True, alpha=0.3, axis='y')
# Strip plot - shows individual protocols
for cluster_id in [1, 2]:
cluster_data = X_lda[y == cluster_id]
cluster_protocols = df_with_clusters[df_with_clusters['cluster'] == cluster_id]['Descriptor'].values
# Add jitter for visibility
y_jitter = np.random.normal(cluster_id, 0.1, size=len(cluster_data))
ax2.scatter(cluster_data, y_jitter,
c=colors[cluster_id], alpha=0.5, s=40,
edgecolors='white', linewidth=0.3)
# Label a few representative protocols
for i in range(0, len(cluster_data), 25):
ax2.annotate(cluster_protocols[i],
(cluster_data[i], y_jitter[i]),
fontsize=7, alpha=0.7,
xytext=(0, 5), textcoords='offset points',
rotation=45, ha='left')
ax2.set_xlabel('Linear Discriminant (LD1)', fontsize=12)
ax2.set_ylabel('Cluster', fontsize=12)
ax2.set_yticks([1, 2])
ax2.set_yticklabels(['Cluster 1:\nRelational/Cultural', 'Cluster 2:\nInstitutional/Bureaucratic'])
ax2.set_title('Individual Protocols Projected onto Discriminant Axis', fontsize=12)
ax2.grid(True, alpha=0.3, axis='x')
plt.tight_layout()
plt.savefig(plots_dir / 'lda_cluster_separation.png', dpi=300, bbox_inches='tight')
print(f"Saved: {plots_dir / 'lda_cluster_separation.png'}")
# Calculate separation metrics
mean_1 = X_lda[y == 1].mean()
mean_2 = X_lda[y == 2].mean()
std_1 = X_lda[y == 1].std()
std_2 = X_lda[y == 2].std()
# Cohen's d (effect size)
pooled_std = np.sqrt((std_1**2 + std_2**2) / 2)
cohens_d = abs(mean_1 - mean_2) / pooled_std
print(f"\n=== Cluster Separation Statistics ===")
mean_1_val = mean_1[0] if isinstance(mean_1, np.ndarray) else mean_1
mean_2_val = mean_2[0] if isinstance(mean_2, np.ndarray) else mean_2
cohens_d_val = cohens_d[0] if isinstance(cohens_d, np.ndarray) else cohens_d
print(f"Cluster 1 mean: {mean_1_val:.3f} (std: {std_1:.3f})")
print(f"Cluster 2 mean: {mean_2_val:.3f} (std: {std_2:.3f})")
print(f"Distance between means: {abs(mean_1_val - mean_2_val):.3f}")
print(f"Cohen's d (effect size): {cohens_d_val:.3f}")
print(f" (0.2=small, 0.5=medium, 0.8=large effect)")
# Overlap percentage (rough estimate)
overlap_start = max(X_lda[y == 1].min(), X_lda[y == 2].min())
overlap_end = min(X_lda[y == 1].max(), X_lda[y == 2].max())
overlap_range = overlap_end - overlap_start if overlap_end > overlap_start else 0
total_range = X_lda.max() - X_lda.min()
overlap_pct = (overlap_range / total_range) * 100 if overlap_range > 0 else 0
print(f"Approximate overlap: {overlap_pct:.1f}% of total range")
# Save LDA projection data
lda_df = pd.DataFrame({
'Descriptor': df_with_clusters['Descriptor'],
'LD1': X_lda.flatten(),
'Cluster': y
})
lda_df.to_csv(data_dir / 'lda_projection.csv', index=False)
print(f"Saved: {data_dir / 'lda_projection.csv'}")
print("\n=== Most discriminating dimensions ===")
loadings = pd.DataFrame({
'Dimension': dimension_cols,
'LDA_Coefficient': lda.coef_[0]
})
loadings['Abs_Coefficient'] = loadings['LDA_Coefficient'].abs()
loadings = loadings.sort_values('Abs_Coefficient', ascending=False)
print("\nTop 10 dimensions that separate the clusters:")
for _, row in loadings.head(10).iterrows():
print(f" {row['Dimension']}: {row['LDA_Coefficient']:.3f}")
loadings.to_csv(data_dir / 'lda_coefficients.csv', index=False)
print(f"\nSaved: {data_dir / 'lda_coefficients.csv'}")
if __name__ == '__main__':
main()