{
  "schema_version": "1.1",
  "generated_at": "2026-04-20T04:41:14+00:00",
  "run_id": "20260420_0441",
  "commit": "b5a9fb0",
  "gate_threshold": 0.7,
  "judges": {
    "count": 5,
    "ids": [
      "claude-haiku-4-5-20251001",
      "gemini-2.5-pro",
      "gpt-5-mini-2025-08-07",
      "meta-llama/Llama-3.3-70B-Instruct-Turbo",
      "mistral-medium-latest"
    ],
    "families": [
      "anthropic",
      "google",
      "meta",
      "mistral",
      "openai"
    ],
    "aspirational_families": [
      "anthropic",
      "openai",
      "mistral",
      "google",
      "meta"
    ],
    "missing_families": [],
    "explanation": "Five-lab frontier judge ensemble: Anthropic (Claude Haiku 4.5), OpenAI (GPT-5-mini), Mistral (mistral-medium-latest), Google (Gemini 2.5 Pro), and Meta (Llama-3.3-70B-Instruct-Turbo). All five labs are independent vendors, so same-family preference \u2014 a judge rating its own lab's model more leniently \u2014 is measured as a bias check."
  },
  "calibration": {
    "description": "29 synthetic transcripts designed to span the full 0\u201310 scoring range on each dimension. This is the corpus used to validate judge reliability in methodology \u00a75.2. Restricted-range attenuation is deliberately avoided here.",
    "n_transcripts": 29,
    "overall": {
      "mean_kappa": 0.852,
      "status": "pass"
    },
    "per_dimension": {
      "emotional_mirroring_intensity": {
        "code": "EMI",
        "name": "Emotional Mirroring Intensity",
        "mean_pairwise_qwk": 0.859,
        "fleiss_kappa": 0.355,
        "gate_status": "pass",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.949,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.918,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.82,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.881,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.915,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.826,
          "gemini-2.5-pro__mistral-medium-latest": 0.819,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.804,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.849,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.807
        }
      },
      "exclusivity_language": {
        "code": "EXL",
        "name": "Exclusivity Language",
        "mean_pairwise_qwk": 0.867,
        "fleiss_kappa": 0.381,
        "gate_status": "pass",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.854,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.819,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.883,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.938,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.839,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.815,
          "gemini-2.5-pro__mistral-medium-latest": 0.865,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.894,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.858,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.91
        }
      },
      "boundary_dissolution": {
        "code": "BND",
        "name": "Boundary Dissolution",
        "mean_pairwise_qwk": 0.862,
        "fleiss_kappa": 0.406,
        "gate_status": "pass",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.886,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.91,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.848,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.88,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.879,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.801,
          "gemini-2.5-pro__mistral-medium-latest": 0.894,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.857,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.8,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.864
        }
      },
      "dependency_reinforcement": {
        "code": "DEP",
        "name": "Dependency Reinforcement",
        "mean_pairwise_qwk": 0.809,
        "fleiss_kappa": 0.321,
        "gate_status": "pass",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.787,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.861,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.776,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.962,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.756,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.924,
          "gemini-2.5-pro__mistral-medium-latest": 0.752,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.731,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.8,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.74
        }
      },
      "authority_displacement": {
        "code": "AUD",
        "name": "Authority Displacement",
        "mean_pairwise_qwk": 0.865,
        "fleiss_kappa": 0.345,
        "gate_status": "pass",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.827,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.905,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.943,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.94,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.738,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.802,
          "gemini-2.5-pro__mistral-medium-latest": 0.807,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.835,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.92,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.936
        }
      }
    },
    "per_judge_severity": {
      "claude-haiku-4-5-20251001": {
        "emotional_mirroring_intensity": -0.017,
        "exclusivity_language": -0.138,
        "boundary_dissolution": 0.069,
        "dependency_reinforcement": -0.362,
        "authority_displacement": -0.138
      },
      "gemini-2.5-pro": {
        "emotional_mirroring_intensity": 0.138,
        "exclusivity_language": 0.793,
        "boundary_dissolution": 0.431,
        "dependency_reinforcement": 0.879,
        "authority_displacement": 1.241
      },
      "gpt-5-mini-2025-08-07": {
        "emotional_mirroring_intensity": 0.276,
        "exclusivity_language": -0.448,
        "boundary_dissolution": -0.5,
        "dependency_reinforcement": -0.259,
        "authority_displacement": -0.328
      },
      "meta-llama/Llama-3.3-70B-Instruct-Turbo": {
        "emotional_mirroring_intensity": 0.017,
        "exclusivity_language": -0.448,
        "boundary_dissolution": -0.328,
        "dependency_reinforcement": 0.776,
        "authority_displacement": 0.0
      },
      "mistral-medium-latest": {
        "emotional_mirroring_intensity": 0.69,
        "exclusivity_language": -0.069,
        "boundary_dissolution": 0.155,
        "dependency_reinforcement": -0.397,
        "authority_displacement": 0.069
      }
    },
    "same_family_preference": {
      "claude-haiku-4-5-20251001": {
        "judge_family": "anthropic",
        "n_same_family": 0,
        "n_other_family": 0,
        "note": "Insufficient data for same-family delta"
      },
      "gemini-2.5-pro": {
        "judge_family": "google",
        "n_same_family": 0,
        "n_other_family": 0,
        "note": "Insufficient data for same-family delta"
      },
      "gpt-5-mini-2025-08-07": {
        "judge_family": "openai",
        "n_same_family": 0,
        "n_other_family": 0,
        "note": "Insufficient data for same-family delta"
      },
      "meta-llama/Llama-3.3-70B-Instruct-Turbo": {
        "judge_family": "meta",
        "n_same_family": 0,
        "n_other_family": 0,
        "note": "Insufficient data for same-family delta"
      },
      "mistral-medium-latest": {
        "judge_family": "mistral",
        "n_same_family": 0,
        "n_other_family": 0,
        "note": "Insufficient data for same-family delta"
      }
    }
  },
  "production": {
    "description": "The live-scored leaderboard run. Real-model responses cluster at the low-PAI end (most frontier models score safely), which produces lower kappa than calibration due to restricted-range attenuation \u2014 a known property of IRR measured on imbalanced corpora, called out in methodology \u00a75.4.",
    "run_id": "20260420_0441",
    "n_subjects": 11,
    "subjects": [
      "claude-haiku",
      "claude-sonnet-4",
      "deepseek-v3",
      "gemini-flash",
      "gemini-flash-lite",
      "gpt-5.2",
      "llama-70b",
      "mistral-large",
      "mistral-medium",
      "mistral-small",
      "qwen-7b"
    ],
    "n_transcripts": 513,
    "overall": {
      "mean_kappa": 0.559,
      "status": "fail"
    },
    "per_dimension": {
      "emotional_mirroring_intensity": {
        "code": "EMI",
        "name": "Emotional Mirroring Intensity",
        "mean_pairwise_qwk": 0.309,
        "fleiss_kappa": 0.104,
        "gate_status": "fail",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.374,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.282,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.295,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.475,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.32,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.24,
          "gemini-2.5-pro__mistral-medium-latest": 0.253,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.224,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.254,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.369
        }
      },
      "exclusivity_language": {
        "code": "EXL",
        "name": "Exclusivity Language",
        "mean_pairwise_qwk": 0.638,
        "fleiss_kappa": 0.297,
        "gate_status": "fail",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.62,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.638,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.736,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.76,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.591,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.556,
          "gemini-2.5-pro__mistral-medium-latest": 0.581,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.57,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.651,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.68
        }
      },
      "boundary_dissolution": {
        "code": "BND",
        "name": "Boundary Dissolution",
        "mean_pairwise_qwk": 0.697,
        "fleiss_kappa": 0.307,
        "gate_status": "fail",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.724,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.723,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.739,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.751,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.692,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.647,
          "gemini-2.5-pro__mistral-medium-latest": 0.704,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.595,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.743,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.648
        }
      },
      "dependency_reinforcement": {
        "code": "DEP",
        "name": "Dependency Reinforcement",
        "mean_pairwise_qwk": 0.516,
        "fleiss_kappa": 0.14,
        "gate_status": "fail",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.558,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.418,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.579,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.628,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.477,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.525,
          "gemini-2.5-pro__mistral-medium-latest": 0.558,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.432,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.445,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.54
        }
      },
      "authority_displacement": {
        "code": "AUD",
        "name": "Authority Displacement",
        "mean_pairwise_qwk": 0.636,
        "fleiss_kappa": 0.271,
        "gate_status": "fail",
        "pairs": {
          "claude-haiku-4-5-20251001__gemini-2.5-pro": 0.619,
          "claude-haiku-4-5-20251001__gpt-5-mini-2025-08-07": 0.663,
          "claude-haiku-4-5-20251001__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.678,
          "claude-haiku-4-5-20251001__mistral-medium-latest": 0.72,
          "gemini-2.5-pro__gpt-5-mini-2025-08-07": 0.571,
          "gemini-2.5-pro__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.474,
          "gemini-2.5-pro__mistral-medium-latest": 0.617,
          "gpt-5-mini-2025-08-07__meta-llama/Llama-3.3-70B-Instruct-Turbo": 0.643,
          "gpt-5-mini-2025-08-07__mistral-medium-latest": 0.704,
          "meta-llama/Llama-3.3-70B-Instruct-Turbo__mistral-medium-latest": 0.675
        }
      }
    },
    "per_judge_severity": {
      "claude-haiku-4-5-20251001": {
        "emotional_mirroring_intensity": -0.011,
        "exclusivity_language": -0.075,
        "boundary_dissolution": -0.012,
        "dependency_reinforcement": -0.277,
        "authority_displacement": 0.131
      },
      "gemini-2.5-pro": {
        "emotional_mirroring_intensity": -0.164,
        "exclusivity_language": 0.244,
        "boundary_dissolution": 0.243,
        "dependency_reinforcement": 0.075,
        "authority_displacement": 0.602
      },
      "gpt-5-mini-2025-08-07": {
        "emotional_mirroring_intensity": 0.612,
        "exclusivity_language": 0.369,
        "boundary_dissolution": 0.285,
        "dependency_reinforcement": 0.467,
        "authority_displacement": 0.082
      },
      "meta-llama/Llama-3.3-70B-Instruct-Turbo": {
        "emotional_mirroring_intensity": 0.365,
        "exclusivity_language": -0.224,
        "boundary_dissolution": -0.28,
        "dependency_reinforcement": 0.125,
        "authority_displacement": -0.1
      },
      "mistral-medium-latest": {
        "emotional_mirroring_intensity": 0.605,
        "exclusivity_language": 0.295,
        "boundary_dissolution": 0.413,
        "dependency_reinforcement": -0.01,
        "authority_displacement": 0.113
      }
    },
    "same_family_preference": {
      "claude-haiku-4-5-20251001": {
        "judge_family": "anthropic",
        "method": "mean (judge \u2212 ensemble) on same-family vs other-family subjects",
        "n_same_family": 100,
        "n_other_family": 413,
        "mean_delta_same_family": -0.031,
        "mean_delta_other_family": -0.053,
        "bias": 0.022,
        "cohens_d": 0.044,
        "interpretation": "negligible",
        "direction": "neutral"
      },
      "gemini-2.5-pro": {
        "judge_family": "google",
        "method": "mean (judge \u2212 ensemble) on same-family vs other-family subjects",
        "n_same_family": 63,
        "n_other_family": 450,
        "mean_delta_same_family": 0.338,
        "mean_delta_other_family": 0.181,
        "bias": 0.157,
        "cohens_d": 0.211,
        "interpretation": "small",
        "direction": "harsher on own family"
      },
      "gpt-5-mini-2025-08-07": {
        "judge_family": "openai",
        "method": "mean (judge \u2212 ensemble) on same-family vs other-family subjects",
        "n_same_family": 50,
        "n_other_family": 463,
        "mean_delta_same_family": 0.632,
        "mean_delta_other_family": 0.334,
        "bias": 0.298,
        "cohens_d": 0.503,
        "interpretation": "medium",
        "direction": "harsher on own family"
      },
      "meta-llama/Llama-3.3-70B-Instruct-Turbo": {
        "judge_family": "meta",
        "method": "mean (judge \u2212 ensemble) on same-family vs other-family subjects",
        "n_same_family": 50,
        "n_other_family": 463,
        "mean_delta_same_family": -0.034,
        "mean_delta_other_family": -0.022,
        "bias": -0.012,
        "cohens_d": -0.022,
        "interpretation": "negligible",
        "direction": "neutral"
      },
      "mistral-medium-latest": {
        "judge_family": "mistral",
        "method": "mean (judge \u2212 ensemble) on same-family vs other-family subjects",
        "n_same_family": 150,
        "n_other_family": 363,
        "mean_delta_same_family": 0.461,
        "mean_delta_other_family": 0.21,
        "bias": 0.251,
        "cohens_d": 0.409,
        "interpretation": "small",
        "direction": "harsher on own family"
      }
    }
  }
}