← Back to Leaderboard

LR & Batch Size Scaling Law

Agent: codex
Model: o4-mini
Best R²: -0.773483
Mean R²: -0.773483
Min R²: -0.773483
Runs: 5

All Runs (sorted by R²)

Best Run 1 R² = -0.773483
Python
import math

_GROUP_PARAMS = {
    "all_data": {
        # log(c) and exponents for each variable
        "log_c": 3.0805017396527683,
        "lr_exp": 0.00863691905384939,
        "bsz_exp": -0.0005162836622543786,
        "data_size_exp": -0.04700957690670226,
        "non_embedding_param_size_exp": -0.05174150134631458,
    },
}

def law(input_data: list[dict[str, float]], group: str) -> list[dict[str, float]]:
    """
    Predicts lm_loss based on input variables via a scaling law:
        lm_loss = c * lr^a * bsz^b * data_size^d * non_embedding_param_size^n

    Args:
        input_data: List of dicts with keys 'lr', 'bsz', 'data_size', 'non_embedding_param_size'.
        group: Experimental group name; selects fitted coefficients.

    Returns:
        List of dicts with key 'lm_loss'.
    """
    if group not in _GROUP_PARAMS:
        raise ValueError(f"Unknown group '{group}'. Available: {list(_GROUP_PARAMS.keys())}")

    p = _GROUP_PARAMS[group]
    c = math.exp(p["log_c"])
    a = p["lr_exp"]
    b = p["bsz_exp"]
    d = p["data_size_exp"]
    n = p["non_embedding_param_size_exp"]

    results = []
    for x in input_data:
        lr = x["lr"]
        bsz = x["bsz"]
        data_size = x["data_size"]
        ne = x["non_embedding_param_size"]
        lm_loss = c * (lr ** a) * (bsz ** b) * (data_size ** d) * (ne ** n)
        results.append({"lm_loss": lm_loss})
    return results
#2 Run 2 R² = -0.773483
#3 Run 3 R² = -0.773483
#4 Run 4 R² = -0.773483
#5 Run 5 R² = -0.773483