🚀 Quick Start

bash
#  Clone the repository
git clone https://github.com/VM230705/MAT-HPO-Library.git
cd MAT-HPO-Library

#  Install dependencies
pip install torch numpy scikit-learn

# ✅ Test installation
python test_working_examples.py

️ Step 2: Define Environment

Create a class that inherits from BaseEnvironment:

python
from MAT_HPO_LIB import BaseEnvironment

class MyEnvironment(BaseEnvironment):
    def __init__(self, name="MyEnvironment"):
        super().__init__(name)
        #  Initialize your data, model, etc.

    def step(self, hyperparams):
        """
         Train and evaluate model with given hyperparameters
        
        Args:
            hyperparams: dict of hyperparameter values
            
        Returns:
            f1_score, auc_score, gmean_score, done
        """
        # ️ 1. Train your model with hyperparams
        model = create_model(hyperparams)
        trained_model = train_model(model, hyperparams)

        #  2. Evaluate the model
        f1 = evaluate_f1(trained_model)
        auc = evaluate_auc(trained_model)
        gmean = evaluate_gmean(trained_model)

        # ✅ 3. Check if optimization should stop
        done = check_stopping_condition()

        return f1, auc, gmean, done

Step 3: Define Hyperparameter Space

Specify hyperparameter ranges for optimization. Use continuous bounds (min, max) - the system will automatically handle int/float casting based on your model's needs:

python
from MAT_HPO_LIB import HyperparameterSpace

#  Method 1: Using add_continuous with agent assignment
space = HyperparameterSpace()

#  Agent 0: Problem-specific parameters (float ranges)
space.add_continuous('class_weight_0', 0.1, 5.0, agent=0)
space.add_continuous('class_weight_1', 0.1, 5.0, agent=0)

# ️ Agent 1: Architecture parameters (int ranges)
space.add_continuous('hidden_size', 64, 512, agent=1)    # Will be cast to int
space.add_continuous('num_layers', 2, 8, agent=1)        # Will be cast to int

#  Agent 2: Training parameters
space.add_continuous('learning_rate', 1e-5, 1e-2, agent=2)  # float
space.add_discrete('batch_size', [16, 32, 64, 128], agent=2)        # Discrete choices

#  Method 2: Using bounds dictionary (more explicit)
space = HyperparameterSpace()
space.add_continuous('class_weight_0', 0.5, 3.0, agent=0)  # Problem-specific
space.add_discrete('optimizer', ['adam', 'sgd', 'rmsprop'], agent=2)  # Training
space.add_boolean('use_dropout', agent=1)  # Architecture

Step 4: Run Optimization

python
from MAT_HPO_LIB import MAT_HPO_Optimizer
from MAT_HPO_LIB.utils import DefaultConfigs

# ️ Create environment and space (from previous steps)
environment = MyEnvironment()
space = HyperparameterSpace()  # (configured above)

# ⚙️ Choose configuration
config = DefaultConfigs.standard()  # 100 optimization steps

#  Create optimizer
optimizer = MAT_HPO_Optimizer(
    environment=environment,
    hyperparameter_space=space,
    config=config
)

#  Run optimization
print(" Starting optimization...")
results = optimizer.optimize()

#  Get results
best_params = results['best_hyperparameters']
best_performance = results['best_performance']

print(f" Best hyperparameters: {best_params}")
print(f" Best performance: {best_performance}")

Step 5: View Results

Results are automatically saved to files:

  • best_hyperparams.json - Best hyperparameters found
  • optimization_results.json - Complete optimization results
  • step_log.jsonl - Step-by-step log
python
#  Load results from file
import json

with open('best_hyperparams.json', 'r') as f:
    best_params = json.load(f)

print(" Optimized hyperparameters:")
for param, value in best_params.items():
    print(f"   {param}: {value}")

⚙️ Configuration Options

python
from MAT_HPO_LIB.utils import DefaultConfigs, OptimizationConfig

#  Quick test (10 steps) - Perfect for debugging
config = DefaultConfigs.quick_test()

#  Standard optimization (100 steps) - Recommended for most use cases
config = DefaultConfigs.standard()

#  CPU-only mode - For environments without GPU
config = DefaultConfigs.cpu_only()

#  Custom configuration - Full control over parameters
config = OptimizationConfig(
    max_steps=50,          # Number of optimization steps
    batch_size=32,         # Training batch size
    use_cuda=True,         # Enable GPU acceleration
    gpu_device=0          # GPU device ID
)

Next Steps

Working Examples

See complete code examples and use cases

API Reference

Detailed documentation of all classes and methods