Overview

The BaseEnvironment class is the core abstract interface for MAT-HPO optimization. Inherit from this class to define your optimization problem.

️ Class Definition

python
from MAT_HPO_LIB import BaseEnvironment

class MyEnvironment(BaseEnvironment):
    def __init__(self, name="MyEnvironment"):
        super().__init__(name)

Required Methods

load_data()

load_data() -> Any

Load and preprocess your dataset. Store data as instance variables.

python
def load_data(self):
    # Load your data
    X, y = load_my_dataset()

    # Store as instance variables
    self.X_train, self.X_val = train_test_split(X, y)

    return {"samples": len(X), "features": X.shape[1]}

create_model(hyperparams)

create_model(hyperparams: dict) -> Any

Create a model with given hyperparameters.

python
def create_model(self, hyperparams):
    model = MyModel(
        hidden_size=hyperparams['hidden_size'],
        learning_rate=hyperparams['learning_rate']
    )
    return model

train_evaluate(model, hyperparams)

train_evaluate(model: Any, hyperparams: dict) -> dict

Train the model and return evaluation metrics.

python
def train_evaluate(self, model, hyperparams):
    # Train your model
    model.fit(self.X_train, self.y_train)

    # Evaluate performance
    predictions = model.predict(self.X_val)
    f1 = f1_score(self.y_val, predictions)
    accuracy = accuracy_score(self.y_val, predictions)

    return {'f1': f1, 'accuracy': accuracy}

compute_reward(metrics)

compute_reward(metrics: dict) -> float

Compute reward from metrics (higher is better).

python
def compute_reward(self, metrics):
    # Simple reward: optimize F1-score
    return metrics['f1']

    # Multi-objective reward
    return 0.7 * metrics['f1'] + 0.3 * metrics['accuracy']

Complete Example

python
from MAT_HPO_LIB import BaseEnvironment
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score

class RandomForestEnvironment(BaseEnvironment):
    def load_data(self):
        # Load your dataset
        X, y = self.load_my_data()
        self.X_train, self.X_val, self.y_train, self.y_val = \
            train_test_split(X, y, test_size=0.2, random_state=42)
        return {"samples": len(X), "features": X.shape[1]}

    def create_model(self, hyperparams):
        return RandomForestClassifier(
            n_estimators=int(hyperparams['n_estimators']),
            max_depth=int(hyperparams['max_depth']),
            random_state=42
        )

    def train_evaluate(self, model, hyperparams):
        model.fit(self.X_train, self.y_train)
        predictions = model.predict(self.X_val)

        return {
            'f1': f1_score(self.y_val, predictions, average='weighted'),
            'accuracy': accuracy_score(self.y_val, predictions)
        }

    def compute_reward(self, metrics):
        return metrics['f1']  # Optimize for F1-score

Next Steps

Complete Example

See a full working example with neural networks

Quick Start Guide

Step-by-step tutorial for your first optimization