refactor: reorganize project structure and enhance model verbosity
This commit is contained in:
@@ -1,159 +1,14 @@
|
||||
#!.venv/bin/python3
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
import pandas as pd
|
||||
import logging
|
||||
|
||||
from core.config import get_config, setup_logging
|
||||
from research.experiment import ExperimentConfig
|
||||
from research.experiment.experiment_tracker import ExperimentTracker
|
||||
from research.experiment.feature_extractor import FeatureType
|
||||
from research.experiment.experiment_builder import ExperimentBuilder
|
||||
from research.experiment.experiment_runner import ExperimentRunner
|
||||
from research.model_registry import list_available_models
|
||||
|
||||
|
||||
def create_experiment_from_args(args) -> ExperimentConfig:
|
||||
"""Create experiment configuration from command line arguments"""
|
||||
|
||||
features = []
|
||||
if args.features:
|
||||
for feature_name in args.features:
|
||||
try:
|
||||
features.append(FeatureType(feature_name))
|
||||
except ValueError:
|
||||
logging.warning(f"Unknown feature type '{feature_name}', skipping")
|
||||
|
||||
if not features:
|
||||
features = [FeatureType.FULL_NAME] # Default
|
||||
|
||||
# Parse model parameters
|
||||
model_params = {}
|
||||
if args.model_params:
|
||||
try:
|
||||
model_params = json.loads(args.model_params)
|
||||
except json.JSONDecodeError:
|
||||
logging.warning("Invalid JSON for model parameters, using defaults")
|
||||
|
||||
# Parse feature parameters
|
||||
feature_params = {}
|
||||
if args.feature_params:
|
||||
try:
|
||||
feature_params = json.loads(args.feature_params)
|
||||
except json.JSONDecodeError:
|
||||
logging.warning("Invalid JSON for feature parameters, using defaults")
|
||||
|
||||
# Parse data filters
|
||||
train_filter = None
|
||||
if args.train_filter:
|
||||
try:
|
||||
train_filter = json.loads(args.train_filter)
|
||||
except json.JSONDecodeError:
|
||||
logging.warning("Invalid JSON for train filter, ignoring")
|
||||
|
||||
return ExperimentConfig(
|
||||
name=args.name,
|
||||
description=args.description or "",
|
||||
tags=args.tags or [],
|
||||
model_type=args.model_type,
|
||||
model_params=model_params,
|
||||
features=features,
|
||||
feature_params=feature_params,
|
||||
train_data_filter=train_filter,
|
||||
target_column=args.target,
|
||||
test_size=args.test_size,
|
||||
random_seed=args.seed,
|
||||
cross_validation_folds=args.cv_folds,
|
||||
metrics=args.metrics or ["accuracy", "precision", "recall", "f1"],
|
||||
)
|
||||
|
||||
|
||||
def run_single_experiment(args):
|
||||
"""Run a single experiment"""
|
||||
|
||||
config = create_experiment_from_args(args)
|
||||
runner = ExperimentRunner()
|
||||
experiment_id = runner.run_experiment(config)
|
||||
|
||||
logging.info(f"Experiment completed: {experiment_id}")
|
||||
|
||||
# Show results
|
||||
experiment = runner.tracker.get_experiment(experiment_id)
|
||||
if experiment:
|
||||
logging.info("Results:")
|
||||
for metric, value in experiment.test_metrics.items():
|
||||
logging.info(f" Test {metric}: {value:.4f}")
|
||||
|
||||
if experiment.cv_metrics:
|
||||
logging.info("Cross-validation:")
|
||||
for metric, value in experiment.cv_metrics.items():
|
||||
if not metric.endswith("_std"):
|
||||
std_key = f"{metric}_std"
|
||||
std_val = experiment.cv_metrics.get(std_key, 0)
|
||||
logging.info(f" CV {metric}: {value:.4f} ± {std_val:.4f}")
|
||||
|
||||
|
||||
def run_baseline_experiments(args):
|
||||
"""Run baseline experiments"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
builder = ExperimentBuilder()
|
||||
experiments = builder.create_baseline_experiments()
|
||||
|
||||
runner = ExperimentRunner()
|
||||
experiment_ids = runner.run_experiment_batch(experiments)
|
||||
|
||||
logging.info(f"Completed {len(experiment_ids)} baseline experiments")
|
||||
|
||||
# Show comparison
|
||||
if experiment_ids:
|
||||
comparison = runner.compare_experiments(experiment_ids)
|
||||
logging.info("Baseline Results Comparison:")
|
||||
logging.info(
|
||||
comparison[["name", "model_type", "features", "test_accuracy"]].to_string(index=False)
|
||||
)
|
||||
|
||||
|
||||
def run_ablation_study(args):
|
||||
"""Run feature ablation study"""
|
||||
|
||||
builder = ExperimentBuilder()
|
||||
experiments = builder.create_feature_ablation_study()
|
||||
|
||||
runner = ExperimentRunner()
|
||||
experiment_ids = runner.run_experiment_batch(experiments)
|
||||
|
||||
logging.info(f"Completed {len(experiment_ids)} ablation experiments")
|
||||
|
||||
# Show results
|
||||
if experiment_ids:
|
||||
comparison = runner.compare_experiments(experiment_ids)
|
||||
logging.info("Ablation Study Results:")
|
||||
logging.info(comparison[["name", "test_accuracy", "test_f1"]].to_string(index=False))
|
||||
|
||||
|
||||
def run_component_study(args):
|
||||
"""Run name component study"""
|
||||
|
||||
builder = ExperimentBuilder()
|
||||
experiments = builder.create_name_component_study()
|
||||
|
||||
runner = ExperimentRunner()
|
||||
experiment_ids = runner.run_experiment_batch(experiments)
|
||||
|
||||
logging.info(f"Completed {len(experiment_ids)} component study experiments")
|
||||
|
||||
# Show results
|
||||
if experiment_ids:
|
||||
comparison = runner.compare_experiments(experiment_ids)
|
||||
logging.info("Name Component Study Results:")
|
||||
logging.info(
|
||||
comparison[["name", "test_accuracy", "test_precision", "test_recall"]].to_string(
|
||||
index=False
|
||||
)
|
||||
)
|
||||
from research.experiment.experiment_tracker import ExperimentTracker
|
||||
|
||||
|
||||
def list_experiments(args):
|
||||
@@ -249,7 +104,7 @@ def show_experiment_details(args):
|
||||
def compare_experiments_cmd(args):
|
||||
"""Compare multiple experiments"""
|
||||
|
||||
runner = ExperimentRunner()
|
||||
runner = ExperimentRunner(get_config())
|
||||
comparison = runner.compare_experiments(args.experiment_ids)
|
||||
|
||||
if comparison.empty:
|
||||
@@ -285,43 +140,9 @@ def main():
|
||||
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose logging")
|
||||
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
||||
|
||||
# Single experiment command
|
||||
exp_parser = subparsers.add_parser("run", help="Run a single experiment")
|
||||
exp_parser.add_argument("--name", required=True, help="Experiment name")
|
||||
exp_parser.add_argument("--description", help="Experiment description")
|
||||
exp_parser.add_argument(
|
||||
"--model-type",
|
||||
default="logistic_regression",
|
||||
choices=list_available_models(),
|
||||
help="Model type",
|
||||
)
|
||||
exp_parser.add_argument(
|
||||
"--features", nargs="+", choices=[f.value for f in FeatureType], help="Features to use"
|
||||
)
|
||||
exp_parser.add_argument("--model-params", help="Model parameters as JSON")
|
||||
exp_parser.add_argument("--feature-params", help="Feature parameters as JSON")
|
||||
exp_parser.add_argument("--train-filter", help="Training data filter as JSON")
|
||||
exp_parser.add_argument("--target", default="sex", help="Target column")
|
||||
exp_parser.add_argument("--test-size", type=float, default=0.2, help="Test set size")
|
||||
exp_parser.add_argument("--seed", type=int, default=42, help="Random seed")
|
||||
exp_parser.add_argument("--cv-folds", type=int, default=5, help="CV folds")
|
||||
exp_parser.add_argument(
|
||||
"--metrics",
|
||||
nargs="+",
|
||||
choices=["accuracy", "precision", "recall", "f1"],
|
||||
help="Metrics to calculate",
|
||||
)
|
||||
exp_parser.add_argument("--tags", nargs="+", help="Experiment tags")
|
||||
|
||||
# Batch experiment commands
|
||||
subparsers.add_parser("baseline", help="Run baseline experiments")
|
||||
subparsers.add_parser("ablation", help="Run feature ablation study")
|
||||
subparsers.add_parser("components", help="Run name component study")
|
||||
|
||||
# List experiments
|
||||
list_parser = subparsers.add_parser("list", help="List experiments")
|
||||
list_parser.add_argument("--status", choices=["pending", "running", "completed", "failed"])
|
||||
list_parser.add_argument("--model-type", choices=list_available_models())
|
||||
list_parser.add_argument("--tags", nargs="+", help="Filter by tags")
|
||||
|
||||
# Show experiment details
|
||||
@@ -350,22 +171,15 @@ def main():
|
||||
|
||||
# Execute command
|
||||
try:
|
||||
if args.command == "run":
|
||||
run_single_experiment(args)
|
||||
elif args.command == "baseline":
|
||||
run_baseline_experiments(args)
|
||||
elif args.command == "ablation":
|
||||
run_ablation_study(args)
|
||||
elif args.command == "components":
|
||||
run_component_study(args)
|
||||
elif args.command == "list":
|
||||
list_experiments(args)
|
||||
elif args.command == "show":
|
||||
show_experiment_details(args)
|
||||
elif args.command == "compare":
|
||||
compare_experiments_cmd(args)
|
||||
elif args.command == "export":
|
||||
export_results(args)
|
||||
command_map = {
|
||||
"list": list_experiments,
|
||||
"show": show_experiment_details,
|
||||
"compare": compare_experiments_cmd,
|
||||
"export": export_results,
|
||||
}
|
||||
handler = command_map.get(args.command)
|
||||
if handler:
|
||||
handler(args)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
Reference in New Issue
Block a user