diff --git a/.github/workflows/apo_sim.yml b/.github/workflows/apo_sim.yml
index 1cbdc3f..31ee1cb 100644
--- a/.github/workflows/apo_sim.yml
+++ b/.github/workflows/apo_sim.yml
@@ -17,7 +17,8 @@ jobs:
strategy:
matrix:
script: [
- 'scripts/irm/irm_apo_coverage.py',
+ 'scripts/irm/apo.py',
+ 'scripts/irm/apos.py',
]
steps:
@@ -47,20 +48,27 @@ jobs:
with:
ref: ${{ env.TARGET_BRANCH }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v5
+ with:
+ version: "0.7.8"
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version-file: "monte-cover/pyproject.toml"
- - name: Install dependencies
+ - name: Install Monte-Cover
run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
+ cd monte-cover
+ uv venv
+ uv sync
- name: Install DoubleML from correct branch
run: |
- pip uninstall -y doubleml
- pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
+ source monte-cover/.venv/bin/activate
+ uv pip uninstall doubleml
+ uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
- name: Set up Git configuration
run: |
@@ -68,7 +76,9 @@ jobs:
git config --global user.email 'github-actions@github.com'
- name: Run scripts
- run: python ${{ matrix.script }}
+ run: |
+ source monte-cover/.venv/bin/activate
+ uv run ${{ matrix.script }}
- name: Commit any existing changes
run: |
@@ -86,4 +96,4 @@ jobs:
git pull --rebase origin ${{ env.TARGET_BRANCH }}
git push origin ${{ env.TARGET_BRANCH }}
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/did_sim.yml b/.github/workflows/did_sim.yml
index 199220d..65fc56e 100644
--- a/.github/workflows/did_sim.yml
+++ b/.github/workflows/did_sim.yml
@@ -20,6 +20,7 @@ jobs:
'scripts/did/did_pa_atte_coverage.py',
'scripts/did/did_cs_atte_coverage.py',
'scripts/did/did_pa_multi.py',
+ 'scripts/did/did_cs_multi.py',
]
steps:
@@ -52,7 +53,7 @@ jobs:
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
- version: "0.6.11"
+ version: "0.7.8"
- name: Set up Python
uses: actions/setup-python@v5
diff --git a/.github/workflows/iivm_sim.yml b/.github/workflows/iivm_sim.yml
index 327962c..b7cb787 100644
--- a/.github/workflows/iivm_sim.yml
+++ b/.github/workflows/iivm_sim.yml
@@ -17,7 +17,7 @@ jobs:
strategy:
matrix:
script: [
- 'scripts/irm/iivm_late_coverage.py',
+ 'scripts/irm/iivm_late.py',
]
steps:
@@ -47,20 +47,27 @@ jobs:
with:
ref: ${{ env.TARGET_BRANCH }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v5
+ with:
+ version: "0.7.8"
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version-file: "monte-cover/pyproject.toml"
- - name: Install dependencies
+ - name: Install Monte-Cover
run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
+ cd monte-cover
+ uv venv
+ uv sync
- name: Install DoubleML from correct branch
run: |
- pip uninstall -y doubleml
- pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
+ source monte-cover/.venv/bin/activate
+ uv pip uninstall doubleml
+ uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
- name: Set up Git configuration
run: |
@@ -68,7 +75,9 @@ jobs:
git config --global user.email 'github-actions@github.com'
- name: Run scripts
- run: python ${{ matrix.script }}
+ run: |
+ source monte-cover/.venv/bin/activate
+ uv run ${{ matrix.script }}
- name: Commit any existing changes
run: |
@@ -86,4 +95,4 @@ jobs:
git pull --rebase origin ${{ env.TARGET_BRANCH }}
git push origin ${{ env.TARGET_BRANCH }}
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/irm_sim.yml b/.github/workflows/irm_sim.yml
index 63f5f55..5d26a1b 100644
--- a/.github/workflows/irm_sim.yml
+++ b/.github/workflows/irm_sim.yml
@@ -17,10 +17,10 @@ jobs:
strategy:
matrix:
script: [
- 'scripts/irm/irm_ate_coverage.py',
- 'scripts/irm/irm_atte_coverage.py',
- 'scripts/irm/irm_cate_coverage.py',
- 'scripts/irm/irm_gate_coverage.py',
+ 'scripts/irm/irm_ate.py',
+ 'scripts/irm/irm_atte.py',
+ 'scripts/irm/irm_cate.py',
+ 'scripts/irm/irm_gate.py',
'scripts/irm/irm_ate_sensitivity.py',
'scripts/irm/irm_atte_sensitivity.py',
]
@@ -52,20 +52,27 @@ jobs:
with:
ref: ${{ env.TARGET_BRANCH }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v5
+ with:
+ version: "0.7.8"
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version-file: "monte-cover/pyproject.toml"
- - name: Install dependencies
+ - name: Install Monte-Cover
run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
+ cd monte-cover
+ uv venv
+ uv sync
- name: Install DoubleML from correct branch
run: |
- pip uninstall -y doubleml
- pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
+ source monte-cover/.venv/bin/activate
+ uv pip uninstall doubleml
+ uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
- name: Set up Git configuration
run: |
@@ -73,7 +80,9 @@ jobs:
git config --global user.email 'github-actions@github.com'
- name: Run scripts
- run: python ${{ matrix.script }}
+ run: |
+ source monte-cover/.venv/bin/activate
+ uv run ${{ matrix.script }}
- name: Commit any existing changes
run: |
@@ -91,4 +100,4 @@ jobs:
git pull --rebase origin ${{ env.TARGET_BRANCH }}
git push origin ${{ env.TARGET_BRANCH }}
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/pliv_sim.yml b/.github/workflows/pliv_sim.yml
index 9c6921e..22a91bc 100644
--- a/.github/workflows/pliv_sim.yml
+++ b/.github/workflows/pliv_sim.yml
@@ -17,7 +17,7 @@ jobs:
strategy:
matrix:
script: [
- 'scripts/plm/pliv_late_coverage.py',
+ 'scripts/plm/pliv_late.py',
]
steps:
@@ -47,20 +47,27 @@ jobs:
with:
ref: ${{ env.TARGET_BRANCH }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v5
+ with:
+ version: "0.7.8"
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version-file: "monte-cover/pyproject.toml"
- - name: Install dependencies
+ - name: Install Monte-Cover
run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
-
+ cd monte-cover
+ uv venv
+ uv sync
+
- name: Install DoubleML from correct branch
run: |
- pip uninstall -y doubleml
- pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
+ source monte-cover/.venv/bin/activate
+ uv pip uninstall doubleml
+ uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
- name: Set up Git configuration
run: |
@@ -68,7 +75,9 @@ jobs:
git config --global user.email 'github-actions@github.com'
- name: Run scripts
- run: python ${{ matrix.script }}
+ run: |
+ source monte-cover/.venv/bin/activate
+ uv run ${{ matrix.script }}
- name: Commit any existing changes
run: |
@@ -86,4 +95,4 @@ jobs:
git pull --rebase origin ${{ env.TARGET_BRANCH }}
git push origin ${{ env.TARGET_BRANCH }}
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/plr_sim.yml b/.github/workflows/plr_sim.yml
index 973a5b4..20c61bf 100644
--- a/.github/workflows/plr_sim.yml
+++ b/.github/workflows/plr_sim.yml
@@ -17,10 +17,10 @@ jobs:
strategy:
matrix:
script: [
- 'scripts/plm/plr_ate_coverage.py',
+ 'scripts/plm/plr_ate.py',
'scripts/plm/plr_ate_sensitivity.py',
- 'scripts/plm/plr_cate_coverage.py',
- 'scripts/plm/plr_gate_coverage.py',
+ 'scripts/plm/plr_cate.py',
+ 'scripts/plm/plr_gate.py',
]
steps:
@@ -53,8 +53,8 @@ jobs:
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
- version: "0.6.11"
-
+ version: "0.7.8"
+
- name: Set up Python
uses: actions/setup-python@v5
with:
@@ -98,4 +98,4 @@ jobs:
git pull --rebase origin ${{ env.TARGET_BRANCH }}
git push origin ${{ env.TARGET_BRANCH }}
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/quant_sim.yml b/.github/workflows/quant_sim.yml
index af1ef7f..8304a93 100644
--- a/.github/workflows/quant_sim.yml
+++ b/.github/workflows/quant_sim.yml
@@ -17,9 +17,9 @@ jobs:
strategy:
matrix:
script: [
- 'scripts/irm/cvar_coverage.py',
- 'scripts/irm/pq_coverage.py',
- 'scripts/irm/lpq_coverage.py',
+ 'scripts/irm/cvar.py',
+ 'scripts/irm/pq.py',
+ 'scripts/irm/lpq.py',
]
steps:
@@ -49,20 +49,27 @@ jobs:
with:
ref: ${{ env.TARGET_BRANCH }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v5
+ with:
+ version: "0.7.8"
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version-file: "monte-cover/pyproject.toml"
- - name: Install dependencies
+ - name: Install Monte-Cover
run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
+ cd monte-cover
+ uv venv
+ uv sync
- name: Install DoubleML from correct branch
run: |
- pip uninstall -y doubleml
- pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
+ source monte-cover/.venv/bin/activate
+ uv pip uninstall doubleml
+ uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
- name: Set up Git configuration
run: |
@@ -70,7 +77,9 @@ jobs:
git config --global user.email 'github-actions@github.com'
- name: Run scripts
- run: python ${{ matrix.script }}
+ run: |
+ source monte-cover/.venv/bin/activate
+ uv run ${{ matrix.script }}
- name: Commit any existing changes
run: |
@@ -88,4 +97,4 @@ jobs:
git pull --rebase origin ${{ env.TARGET_BRANCH }}
git push origin ${{ env.TARGET_BRANCH }}
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/rdd_sim.yml b/.github/workflows/rdd_sim.yml
index 533a6c6..ea490ee 100644
--- a/.github/workflows/rdd_sim.yml
+++ b/.github/workflows/rdd_sim.yml
@@ -17,8 +17,8 @@ jobs:
strategy:
matrix:
script: [
- 'scripts/rdd/rdd_sharp_coverage.py',
- 'scripts/rdd/rdd_fuzzy_coverage.py',
+ 'scripts/rdd/rdd_sharp.py',
+ 'scripts/rdd/rdd_fuzzy.py',
]
steps:
@@ -48,26 +48,32 @@ jobs:
with:
ref: ${{ env.TARGET_BRANCH }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v5
+ with:
+ version: "0.7.8"
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version-file: "monte-cover/pyproject.toml"
- - name: Install dependencies
+ - name: Install Monte-Cover
run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
+ cd monte-cover
+ uv venv
+ uv sync
- - name: Install DoubleML from correct branch
- run: |
- pip uninstall -y doubleml
- pip install "doubleml[rdd] @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
- - name: Install RDFlex from main branch
+ - name: Install DoubleML from correct branch
run: |
- pip uninstall -y doubleml
- pip install git+https://github.com/DoubleML/doubleml-rdflex.git@main
- pip install rdrobust
+ source monte-cover/.venv/bin/activate
+ uv pip uninstall doubleml
+ uv pip install "doubleml[rdd] @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
- name: Set up Git configuration
run: |
@@ -75,7 +81,9 @@ jobs:
git config --global user.email 'github-actions@github.com'
- name: Run scripts
- run: python ${{ matrix.script }}
+ run: |
+ source monte-cover/.venv/bin/activate
+ uv run ${{ matrix.script }}
- name: Commit any existing changes
run: |
@@ -93,4 +101,4 @@ jobs:
git pull --rebase origin ${{ env.TARGET_BRANCH }}
git push origin ${{ env.TARGET_BRANCH }}
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/ssm_sim.yml b/.github/workflows/ssm_sim.yml
index e5a7474..071a556 100644
--- a/.github/workflows/ssm_sim.yml
+++ b/.github/workflows/ssm_sim.yml
@@ -17,8 +17,8 @@ jobs:
strategy:
matrix:
script: [
- 'scripts/irm/ssm_mar_ate_coverage.py',
- 'scripts/irm/ssm_nonignorable_ate_coverage.py',
+ 'scripts/ssm/ssm_mar_ate.py',
+ 'scripts/ssm/ssm_nonig_ate.py',
]
steps:
@@ -48,20 +48,27 @@ jobs:
with:
ref: ${{ env.TARGET_BRANCH }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v5
+ with:
+ version: "0.7.8"
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version-file: "monte-cover/pyproject.toml"
- - name: Install dependencies
+ - name: Install Monte-Cover
run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
+ cd monte-cover
+ uv venv
+ uv sync
- name: Install DoubleML from correct branch
run: |
- pip uninstall -y doubleml
- pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
+ source monte-cover/.venv/bin/activate
+ uv pip uninstall doubleml
+ uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}"
- name: Set up Git configuration
run: |
@@ -69,11 +76,13 @@ jobs:
git config --global user.email 'github-actions@github.com'
- name: Run scripts
- run: python ${{ matrix.script }}
+ run: |
+ source monte-cover/.venv/bin/activate
+ uv run ${{ matrix.script }}
- name: Commit any existing changes
run: |
- git add results/irm
+ git add results/ssm
git commit -m "Update results from script: ${{ matrix.script }}" || echo "No changed results to commit"
- name: Wait random time
@@ -87,4 +96,4 @@ jobs:
git pull --rebase origin ${{ env.TARGET_BRANCH }}
git push origin ${{ env.TARGET_BRANCH }}
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 628f882..24f7c5c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,4 @@ __pycache__/
# Logs
monte-cover/logs/
-*.log
\ No newline at end of file
+*.log
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..73b4dc1
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,24 @@
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v5.0.0
+ hooks:
+ # File format checks
+ - id: check-yaml
+ exclude: ^results/
+ - id: check-toml
+ # Code quality checks
+ - id: debug-statements
+ - id: check-added-large-files
+ # File formatting fixes
+ - id: mixed-line-ending
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+- repo: https://github.com/psf/black
+ rev: 25.1.0
+ hooks:
+ - id: black
+- repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.11.7
+ hooks:
+ - id: ruff
+ args: ["--fix", "--output-format=full"]
diff --git a/doc/.gitignore b/doc/.gitignore
index 67f9f12..47c274c 100644
--- a/doc/.gitignore
+++ b/doc/.gitignore
@@ -1,2 +1,2 @@
/.quarto/
-/_site/
\ No newline at end of file
+/_site/
diff --git a/doc/_quarto-dev.yml b/doc/_quarto-dev.yml
index 1cebe4e..5c3587a 100644
--- a/doc/_quarto-dev.yml
+++ b/doc/_quarto-dev.yml
@@ -5,7 +5,8 @@ project:
metadata-files:
- _website.yml
-website:
+website:
+ site-url: https://docs.doubleml.org/doubleml-coverage/dev/
drafts:
- index.qmd
# IRM
@@ -14,6 +15,7 @@ website:
- irm/irm_cate.qmd
- irm/apo.qmd
- irm/qte.qmd
+ - irm/iivm.qmd
# PLM
- plm/plr.qmd
- plm/plr_gate.qmd
@@ -27,4 +29,4 @@ website:
- ssm/ssm_nonignorable.qmd
# RDD
- rdd/rdd.qmd
- draft-mode: visible
\ No newline at end of file
+ draft-mode: visible
diff --git a/doc/_quarto.yml b/doc/_quarto.yml
index 4d261c1..69fe054 100644
--- a/doc/_quarto.yml
+++ b/doc/_quarto.yml
@@ -3,4 +3,4 @@ project:
output-dir: _site
metadata-files:
- - _website.yml
\ No newline at end of file
+ - _website.yml
diff --git a/doc/_website.yml b/doc/_website.yml
index 4beb151..4bf06b8 100644
--- a/doc/_website.yml
+++ b/doc/_website.yml
@@ -1,6 +1,7 @@
website:
title: "DoubleML Coverage"
favicon: _static/favicon.ico
+ site-url: https://docs.doubleml.org/doubleml-coverage/
search:
location: sidebar
sidebar:
@@ -17,6 +18,7 @@ website:
- irm/irm_cate.qmd
- irm/apo.qmd
- irm/qte.qmd
+ - irm/iivm.qmd
- text: "PLM"
menu:
- plm/plr.qmd
@@ -25,7 +27,8 @@ website:
- plm/pliv.qmd
- text: "DID"
menu:
- - did/did_multi.qmd
+ - did/did_pa_multi.qmd
+ - did/did_cs_multi.qmd
- did/did_pa.qmd
- did/did_cs.qmd
- text: "SSM"
diff --git a/doc/did/did_cs.qmd b/doc/did/did_cs.qmd
index c6ce684..eab72ec 100644
--- a/doc/did/did_cs.qmd
+++ b/doc/did/did_cs.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## ATTE Coverage
@@ -87,45 +49,57 @@ n_rep = df["repetition"].unique()[0]
display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage"]
```
-### Observational Score
+### Observational Score
```{python}
#| echo: false
-score = "observational"
-level = 0.95
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "observational"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
-### Experimental Score
+### Experimental Score
Remark that the only two valid DGPs are DGP $5$ and DGP $6$. All other DGPs are invalid due to non-experimental treatment assignment.
```{python}
#| echo: false
-score = "experimental"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "experimental"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
-
diff --git a/doc/did/did_cs_multi.qmd b/doc/did/did_cs_multi.qmd
new file mode 100644
index 0000000..fba42d5
--- /dev/null
+++ b/doc/did/did_cs_multi.qmd
@@ -0,0 +1,322 @@
+---
+title: "DiD for Cross-Sectional Data over Multiple Periods"
+
+jupyter: python3
+---
+
+```{python}
+#| echo: false
+
+import numpy as np
+import pandas as pd
+from itables import init_notebook_mode
+import os
+import sys
+
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
+
+init_notebook_mode(all_interactive=True)
+```
+
+## ATTE Coverage
+
+The simulations are based on the [make_did_cs_CS2021](https://docs.doubleml.org/dev/api/generated/doubleml.did.datasets.make_did_cs_CS2021.html)-DGP with $2000$ observations. Learners are both set to either boosting or a linear (logistic) model. Due to time constraints we only consider the following DGPs:
+
+ - Type 1: Linear outcome model and treatment assignment
+ - Type 4: Nonlinear outcome model and treatment assignment
+ - Type 6: Randomized treatment assignment and nonlinear outcome model
+
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all $ATTs$ (point-wise confidence intervals).
+
+::: {.callout-note title="Metadata" collapse="true"}
+
+```{python}
+#| echo: false
+metadata_file = '../../results/did/did_cs_multi_metadata.csv'
+metadata_df = pd.read_csv(metadata_file)
+print(metadata_df.T.to_string(header=False))
+```
+
+:::
+
+```{python}
+#| echo: false
+
+# set up data
+df = pd.read_csv("../../results/did/did_cs_multi_detailed.csv", index_col=None)
+
+assert df["repetition"].nunique() == 1
+n_rep = df["repetition"].unique()[0]
+
+display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+### Observational Score
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+
+### Experimental Score
+
+The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+## Aggregated Effects
+
+These simulations test different types of aggregation, as described in [DiD User Guide](https://docs.doubleml.org/dev/guide/models.html#difference-in-differences-models-did).
+
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all $ATTs$ (point-wise confidence intervals).
+
+### Group Effects
+
+```{python}
+#| echo: false
+
+# set up data
+df_group = pd.read_csv("../../results/did/did_cs_multi_group.csv", index_col=None)
+
+assert df_group["repetition"].nunique() == 1
+n_rep_group = df_group["repetition"].unique()[0]
+
+display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+#### Observational Score
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_group,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_group,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_group,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_group,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+#### Experimental Score
+
+The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_group,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_group,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_group,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_group,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+### Time Effects
+
+```{python}
+#| echo: false
+
+# set up data
+df_time = pd.read_csv("../../results/did/did_cs_multi_time.csv", index_col=None)
+
+assert df_time["repetition"].nunique() == 1
+n_rep_time = df_time["repetition"].unique()[0]
+
+display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+#### Observational Score
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_time,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_time,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_time,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_time,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+#### Experimental Score
+
+The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_time,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_time,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_time,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_time,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+### Event Study Aggregation
+
+```{python}
+#| echo: false
+
+# set up data
+df_es = pd.read_csv("../../results/did/did_cs_multi_eventstudy.csv", index_col=None)
+
+assert df_es["repetition"].nunique() == 1
+n_rep_es = df_es["repetition"].unique()[0]
+
+display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+#### Observational Score
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_es,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_es,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_es,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_es,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+#### Experimental Score
+
+The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
+
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_es,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_es,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_es,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_es,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
diff --git a/doc/did/did_multi.qmd b/doc/did/did_multi.qmd
deleted file mode 100644
index 57d4b3c..0000000
--- a/doc/did/did_multi.qmd
+++ /dev/null
@@ -1,312 +0,0 @@
----
-title: "DiD for Panel Data over Multiple Periods"
-
-jupyter: python3
----
-
-```{python}
-#| echo: false
-
-import numpy as np
-import pandas as pd
-from itables import init_notebook_mode, show, options
-
-init_notebook_mode(all_interactive=True)
-
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
-
-## ATTE Coverage
-
-The simulations are based on the the [make_did_CS2021](https://docs.doubleml.org/dev/api/generated/doubleml.did.datasets.make_did_CS2021.html)-DGP with $2000$ observations. Learners are both set to either boosting or a linear (logistic) model. Due to time constraints we only consider the following DGPs:
-
- - Type 1: Linear outcome model and treatment assignment
- - Type 4: Nonlinear outcome model and treatment assignment
- - Type 6: Randomized treatment assignment and nonlinear outcome model
-
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all $ATTs$ (point-wise confidende intervals).
-
-::: {.callout-note title="Metadata" collapse="true"}
-
-```{python}
-#| echo: false
-metadata_file = '../../results/did/did_multi_metadata.csv'
-metadata_df = pd.read_csv(metadata_file)
-print(metadata_df.T.to_string(header=False))
-```
-
-:::
-
-```{python}
-#| echo: false
-
-# set up data
-df = pd.read_csv("../../results/did/did_multi_detailed.csv", index_col=None)
-
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
-
-display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
-```
-
-### Observational Score
-
-```{python}
-#| echo: false
-score = "observational"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
-```
-
-```{python}
-#| echo: false
-score = "observational"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
-
-
-### Experimental Score
-
-The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
-
-```{python}
-#| echo: false
-score = "experimental"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
-```
-
-```{python}
-#| echo: false
-score = "experimental"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
-
-## Aggregated Effects
-
-These simulations test different types of aggregation, as described in [DiD User Guide](https://docs.doubleml.org/dev/guide/models.html#difference-in-differences-models-did).
-
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all $ATTs$ (point-wise confidende intervals).
-
-### Group Effects
-
-```{python}
-#| echo: false
-
-# set up data
-df = pd.read_csv("../../results/did/did_multi_group.csv", index_col=None)
-
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
-
-display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
-```
-
-#### Observational Score
-
-```{python}
-#| echo: false
-score = "observational"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
-```
-
-```{python}
-#| echo: false
-score = "observational"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
-
-#### Experimental Score
-
-The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
-
-```{python}
-#| echo: false
-score = "experimental"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
-```
-
-```{python}
-#| echo: false
-score = "experimental"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
-
-### Time Effects
-
-```{python}
-#| echo: false
-
-# set up data
-df = pd.read_csv("../../results/did/did_multi_time.csv", index_col=None)
-
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
-
-display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
-```
-
-#### Observational Score
-
-```{python}
-#| echo: false
-score = "observational"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
-```
-
-```{python}
-#| echo: false
-score = "observational"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
-
-#### Experimental Score
-
-The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
-
-```{python}
-#| echo: false
-score = "experimental"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
-```
-
-```{python}
-#| echo: false
-score = "experimental"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
-
-### Event Study Aggregation
-
-```{python}
-#| echo: false
-
-# set up data
-df = pd.read_csv("../../results/did/did_multi_eventstudy.csv", index_col=None)
-
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
-
-display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
-```
-
-#### Observational Score
-
-```{python}
-#| echo: false
-score = "observational"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
-```
-
-```{python}
-#| echo: false
-score = "observational"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
-
-#### Experimental Score
-
-The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
-
-
-```{python}
-#| echo: false
-score = "experimental"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
-```
-
-```{python}
-#| echo: false
-score = "experimental"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
diff --git a/doc/did/did_pa.qmd b/doc/did/did_pa.qmd
index f3abbd3..94f16ed 100644
--- a/doc/did/did_pa.qmd
+++ b/doc/did/did_pa.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## ATTE Coverage
@@ -87,46 +49,61 @@ n_rep = df["repetition"].unique()[0]
display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage"]
```
-### Observational Score
+### Observational Score
```{python}
#| echo: false
-score = "observational"
-level = 0.95
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "observational"
-level = 0.9
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
-### Experimental Score
+### Experimental Score
Remark that the only two valid DGPs are DGP $5$ and DGP $6$. All other DGPs are invalid due to non-experimental treatment assignment.
```{python}
#| echo: false
-score = "experimental"
-level = 0.95
-df_ate_95 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "experimental"
-level = 0.9
-df_ate_9 = df[(df['level'] == level) & (df["Score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
-
diff --git a/doc/did/did_pa_multi.qmd b/doc/did/did_pa_multi.qmd
new file mode 100644
index 0000000..b004299
--- /dev/null
+++ b/doc/did/did_pa_multi.qmd
@@ -0,0 +1,322 @@
+---
+title: "DiD for Panel Data over Multiple Periods"
+
+jupyter: python3
+---
+
+```{python}
+#| echo: false
+
+import numpy as np
+import pandas as pd
+from itables import init_notebook_mode
+import os
+import sys
+
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
+
+init_notebook_mode(all_interactive=True)
+```
+
+## ATTE Coverage
+
+The simulations are based on the the [make_did_CS2021](https://docs.doubleml.org/dev/api/generated/doubleml.did.datasets.make_did_CS2021.html)-DGP with $2000$ observations. Learners are both set to either boosting or a linear (logistic) model. Due to time constraints we only consider the following DGPs:
+
+ - Type 1: Linear outcome model and treatment assignment
+ - Type 4: Nonlinear outcome model and treatment assignment
+ - Type 6: Randomized treatment assignment and nonlinear outcome model
+
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all $ATTs$ (point-wise confidende intervals).
+
+::: {.callout-note title="Metadata" collapse="true"}
+
+```{python}
+#| echo: false
+metadata_file = '../../results/did/did_pa_multi_metadata.csv'
+metadata_df = pd.read_csv(metadata_file)
+print(metadata_df.T.to_string(header=False))
+```
+
+:::
+
+```{python}
+#| echo: false
+
+# set up data
+df = pd.read_csv("../../results/did/did_pa_multi_detailed.csv", index_col=None)
+
+assert df["repetition"].nunique() == 1
+n_rep = df["repetition"].unique()[0]
+
+display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+### Observational Score
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+
+### Experimental Score
+
+The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+## Aggregated Effects
+
+These simulations test different types of aggregation, as described in [DiD User Guide](https://docs.doubleml.org/dev/guide/models.html#difference-in-differences-models-did).
+
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all $ATTs$ (point-wise confidende intervals).
+
+### Group Effects
+
+```{python}
+#| echo: false
+
+# set up data
+df_group = pd.read_csv("../../results/did/did_pa_multi_group.csv", index_col=None)
+
+assert df_group["repetition"].nunique() == 1
+n_rep_group = df_group["repetition"].unique()[0]
+
+display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+#### Observational Score
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_group,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_group,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_group,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_group,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+#### Experimental Score
+
+The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_group,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_group,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_group,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_group,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+### Time Effects
+
+```{python}
+#| echo: false
+
+# set up data
+df_time = pd.read_csv("../../results/did/did_pa_multi_time.csv", index_col=None)
+
+assert df_time["repetition"].nunique() == 1
+n_rep_time = df_time["repetition"].unique()[0]
+
+display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+#### Observational Score
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_time,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_time,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_time,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_time,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+#### Experimental Score
+
+The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_time,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_time,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_time,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_time,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+### Event Study Aggregation
+
+```{python}
+#| echo: false
+
+# set up data
+df_es = pd.read_csv("../../results/did/did_pa_multi_eventstudy.csv", index_col=None)
+
+assert df_es["repetition"].nunique() == 1
+n_rep_es = df_es["repetition"].unique()[0]
+
+display_columns = ["Learner g", "Learner m", "DGP", "In-sample-norm.", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+#### Observational Score
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_es,
+ filters={"level": 0.95, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_es,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_es,
+ filters={"level": 0.9, "Score": "observational"},
+ display_cols=display_columns,
+ n_rep=n_rep_es,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+#### Experimental Score
+
+The results are only valid for the DGP 6, as the experimental score assumes a randomized treatment assignment.
+
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_es,
+ filters={"level": 0.95, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_es,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+generate_and_show_styled_table(
+ main_df=df_es,
+ filters={"level": 0.9, "Score": "experimental"},
+ display_cols=display_columns,
+ n_rep=n_rep_es,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
diff --git a/doc/index.qmd b/doc/index.qmd
index 58f6121..8196e7f 100644
--- a/doc/index.qmd
+++ b/doc/index.qmd
@@ -11,7 +11,7 @@ You can find the code for the simulations in the [GitHub repository](https://git
## Coverage Simulations
-Generally, the [DoubleML package](https://docs.doubleml.org/stable/index.html) solves a moment equation
+Generally, the [DoubleML package](https://docs.doubleml.org/stable/index.html) solves a moment equation
$$
\mathbb{E}[\psi(W,\theta_0,\eta_0)] = 0
@@ -25,12 +25,29 @@ $$
\text{Coverage} = \frac{1}{n_{\text{sim}}} \sum_{i=1}^{n_{\text{sim}}} \mathbb{1}(\hat{\theta}_{\text{lower},i} \leq \theta_0 \leq \hat{\theta}_{\text{upper},i})
$$
-for a nominal coverage level is $1-\alpha$.
+for a nominal coverage level is $1-\alpha$.
The corresponding coverage results are highlighted according to the following color scheme:
-* Green if the deviation to the nominal level is below $5\%$
-* Yellow if the deviation to the nominal level is above $5\%$ and below $10\%$
-* Red if the deviation to the nominal level is above $10\%$
+```{python}
+#| echo: false
+#| output: asis
+from utils.styling import get_coverage_tier_html_span
+
+# Generate color legend using centralized configuration
+good_span = get_coverage_tier_html_span("good")
+medium_span = get_coverage_tier_html_span("medium")
+poor_span = get_coverage_tier_html_span("poor")
+
+from IPython.display import Markdown, display
+
+markdown_output = f"""
+* {good_span} if the deviation to the nominal level is below 5%
+* {medium_span} if the deviation to the nominal level is above 5% and below 10%
+* {poor_span} if the deviation to the nominal level is above 10%
+"""
+
+display(Markdown(markdown_output))
+```
For simulations with multiple parameters of interest, usually pointwise and uniform coverage is assessed.
@@ -249,5 +266,3 @@ fig.show()
:::
:::
-
-
diff --git a/doc/irm/apo.qmd b/doc/irm/apo.qmd
index 473a443..376f083 100644
--- a/doc/irm/apo.qmd
+++ b/doc/irm/apo.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## APO Pointwise Coverage
@@ -68,7 +30,7 @@ The simulations are based on the the [make_irm_data_discrete_treatments](https:
```{python}
#| echo: false
-metadata_file = '../../results/irm/irm_apo_coverage_metadata.csv'
+metadata_file = '../../results/irm/apo_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -78,31 +40,41 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/irm_apo_coverage_apo.csv", index_col=None)
+# set up data
+df_apo = pd.read_csv("../../results/irm/apo_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_apo["repetition"].nunique() == 1
+n_rep_apo = df_apo["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Treatment Level", "Bias", "CI Length", "Coverage"]
+display_columns_apo = ["Learner g", "Learner m", "Treatment Level", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_apo,
+ filters={"level": 0.95},
+ display_cols=display_columns_apo,
+ n_rep=n_rep_apo,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_apo,
+ filters={"level": 0.9},
+ display_cols=display_columns_apo,
+ n_rep=n_rep_apo,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
@@ -110,13 +82,13 @@ make_pretty(df_ate_9, level, n_rep)
The simulations are based on the the [make_irm_data_discrete_treatments](https://docs.doubleml.org/stable/api/api.html#datasets-module)-DGP with $500$ observations. Due to the linearity of the DGP, Lasso and Logit Regression are nearly optimal choices for the nuisance estimation.
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/irm/irm_apo_coverage_metadata.csv'
+metadata_file = '../../results/irm/apos_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -126,93 +98,53 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
+# set up data
+df_apos = pd.read_csv("../../results/irm/apos_coverage.csv", index_col=None)
-```{python}
-#| echo: false
-
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/irm_apo_coverage_apos.csv", index_col=None)
+assert df_apos["repetition"].nunique() == 1
+n_rep_apos = df_apos["repetition"].unique()[0]
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
-
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+display_columns_apos = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_apos,
+ filters={"level": 0.95},
+ display_cols=display_columns_apos,
+ n_rep=n_rep_apos,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_apos,
+ filters={"level": 0.9},
+ display_cols=display_columns_apos,
+ n_rep=n_rep_apos,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
## Causal Contrast Coverage
The simulations are based on the the [make_irm_data_discrete_treatments](https://docs.doubleml.org/stable/api/api.html#datasets-module)-DGP with $500$ observations. Due to the linearity of the DGP, Lasso and Logit Regression are nearly optimal choices for the nuisance estimation.
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/irm/irm_apo_coverage_metadata.csv'
+metadata_file = '../../results/irm/apos_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -222,28 +154,38 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/irm_apo_coverage_apos_contrast.csv", index_col=None)
+# set up data
+df_contrast = pd.read_csv("../../results/irm/apos_causal_contrast.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_contrast["repetition"].nunique() == 1
+n_rep_contrast = df_contrast["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+display_columns_contrast = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_contrast,
+ filters={"level": 0.95},
+ display_cols=display_columns_contrast,
+ n_rep=n_rep_contrast,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df_contrast,
+ filters={"level": 0.9},
+ display_cols=display_columns_contrast,
+ n_rep=n_rep_contrast,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
diff --git a/doc/irm/iivm.qmd b/doc/irm/iivm.qmd
index c74277b..7dd53c2 100644
--- a/doc/irm/iivm.qmd
+++ b/doc/irm/iivm.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## LATE Coverage
@@ -68,7 +30,7 @@ The simulations are based on the the [make_iivm_data](https://docs.doubleml.org
```{python}
#| echo: false
-metadata_file = '../../results/irm/iivm_late_coverage_metadata.csv'
+metadata_file = '../../results/irm/iivm_late_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -84,23 +46,33 @@ df = pd.read_csv("../../results/irm/iivm_late_coverage.csv", index_col=None)
assert df["repetition"].nunique() == 1
n_rep = df["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+display_columns = ["Learner g", "Learner m", "Learner r", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
diff --git a/doc/irm/irm.qmd b/doc/irm/irm.qmd
index 1c006ad..a25087c 100644
--- a/doc/irm/irm.qmd
+++ b/doc/irm/irm.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## ATE Coverage
@@ -68,7 +30,7 @@ The simulations are based on the the [make_irm_data](https://docs.doubleml.org/
```{python}
#| echo: false
-metadata_file = '../../results/irm/irm_ate_coverage_metadata.csv'
+metadata_file = '../../results/irm/irm_ate_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -78,31 +40,41 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/irm_ate_coverage.csv", index_col=None)
+# set up data
+df_ate_cov = pd.read_csv("../../results/irm/irm_ate_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_ate_cov["repetition"].nunique() == 1
+n_rep_ate_cov = df_ate_cov["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+display_columns_ate_cov = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_ate_cov,
+ filters={"level": 0.95},
+ display_cols=display_columns_ate_cov,
+ n_rep=n_rep_ate_cov,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_ate_cov,
+ filters={"level": 0.9},
+ display_cols=display_columns_ate_cov,
+ n_rep=n_rep_ate_cov,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
@@ -114,7 +86,7 @@ As for the ATE, the simulations are based on the the [make_irm_data](https://do
```{python}
#| echo: false
-metadata_file = '../../results/irm/irm_atte_coverage_metadata.csv'
+metadata_file = '../../results/irm/irm_atte_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -124,96 +96,50 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/irm_atte_coverage.csv", index_col=None)
+# set up data
+df_atte_cov = pd.read_csv("../../results/irm/irm_atte_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_atte_cov["repetition"].nunique() == 1
+n_rep_atte_cov = df_atte_cov["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+display_columns_atte_cov = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_atte_95 = df[df['level'] == level][display_columns]
-make_pretty(df_atte_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_atte_cov,
+ filters={"level": 0.95},
+ display_cols=display_columns_atte_cov,
+ n_rep=n_rep_atte_cov,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_atte_9 = df[df['level'] == level][display_columns]
-make_pretty(df_atte_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_atte_cov,
+ filters={"level": 0.9},
+ display_cols=display_columns_atte_cov,
+ n_rep=n_rep_atte_cov,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
## Sensitivity
-The simulations are based on the the ADD-DGP with $10,000$ observations. As the DGP is nonlinear, we will only use corresponding learners. Since the DGP includes an unobserved confounder, we would expect a bias in the ATE estimates, leading to low coverage of the true parameter.
+The simulations are based on the the [make_confounded_irm_data](https://docs.doubleml.org/stable/api/generated/doubleml.datasets.make_confounded_irm_data.html#doubleml.datasets.make_confounded_irm_data)-DGP with $5,000$ observations. Since the DGP includes an unobserved confounder, we would expect a bias in the ATE estimates, leading to low coverage of the true parameter.
The confounding is set such that both sensitivity parameters are approximately $cf_y=cf_d=0.1$, such that the robustness value $RV$ should be approximately $10\%$.
Further, the corresponding confidence intervals are one-sided (since the direction of the bias is unkown), such that only one side should approximate the corresponding coverage level (here only the lower coverage is relevant since the bias is positive). Remark that for the coverage level the value of $\rho$ has to be correctly specified, such that the coverage level will be generally (significantly) larger than the nominal level under the conservative choice of $|\rho|=1$.
-```{python}
-#| echo: false
-
-import numpy as np
-import pandas as pd
-from itables import init_notebook_mode, show, options
-
-init_notebook_mode(all_interactive=True)
-
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Coverage (Lower)"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Coverage (Lower)"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Coverage (Lower)"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Coverage (Lower)"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
-
### ATE
::: {.callout-note title="Metadata" collapse="true"}
@@ -231,33 +157,41 @@ print(metadata_df.T.to_string(header=False))
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/irm/irm_ate_sensitivity.csv", index_col=None)
+df_ate_sens = pd.read_csv("../../results/irm/irm_ate_sensitivity_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_ate_sens["repetition"].nunique() == 1
+n_rep_ate_sens = df_ate_sens["repetition"].unique()[0]
-display_columns = [
+display_columns_ate_sens = [
"Learner g", "Learner m", "Bias", "Bias (Lower)", "Bias (Upper)", "Coverage", "Coverage (Lower)", "Coverage (Upper)", "RV", "RVa"]
+rename_map_sens = {"Learner g": "Learner l"}
+coverage_highlight_cols_sens = ["Coverage", "Coverage (Lower)"]
```
```{python}
#| echo: false
-score = "partialling out"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level)][display_columns]
-df_ate_95.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_ate_sens,
+ filters={"level": 0.95},
+ display_cols=display_columns_ate_sens,
+ n_rep=n_rep_ate_sens,
+ level_col="level",
+ rename_map=rename_map_sens,
+ coverage_highlight_cols=coverage_highlight_cols_sens
+)
```
```{python}
#| echo: false
-score = "partialling out"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level)][display_columns]
-df_ate_9.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_ate_sens,
+ filters={"level": 0.9},
+ display_cols=display_columns_ate_sens,
+ n_rep=n_rep_ate_sens,
+ level_col="level",
+ rename_map=rename_map_sens,
+ coverage_highlight_cols=coverage_highlight_cols_sens
+)
```
### ATTE
@@ -276,32 +210,38 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/irm_atte_sensitivity.csv", index_col=None)
+# set up data
+df_atte_sens = pd.read_csv("../../results/irm/irm_atte_sensitivity_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_atte_sens["repetition"].nunique() == 1
+n_rep_atte_sens = df_atte_sens["repetition"].unique()[0]
-display_columns = [
+display_columns_atte_sens = [
"Learner g", "Learner m", "Bias", "Bias (Lower)", "Bias (Upper)", "Coverage", "Coverage (Lower)", "Coverage (Upper)", "RV", "RVa"]
```
```{python}
#| echo: false
-score = "partialling out"
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level)][display_columns]
-df_ate_95.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_atte_sens,
+ filters={"level": 0.95},
+ display_cols=display_columns_atte_sens,
+ n_rep=n_rep_atte_sens,
+ level_col="level",
+ rename_map=rename_map_sens,
+ coverage_highlight_cols=coverage_highlight_cols_sens
+)
```
```{python}
#| echo: false
-score = "partialling out"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level)][display_columns]
-df_ate_9.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df_atte_sens,
+ filters={"level": 0.9},
+ display_cols=display_columns_atte_sens,
+ n_rep=n_rep_atte_sens,
+ level_col="level",
+ rename_map=rename_map_sens,
+ coverage_highlight_cols=coverage_highlight_cols_sens
+)
+```
diff --git a/doc/irm/irm_cate.qmd b/doc/irm/irm_cate.qmd
index 7c89ab7..df2d3c6 100644
--- a/doc/irm/irm_cate.qmd
+++ b/doc/irm/irm_cate.qmd
@@ -9,68 +9,30 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## CATE Coverage
The simulations are based on the the [make_heterogeneous_data](https://docs.doubleml.org/stable/api/generated/doubleml.datasets.make_heterogeneous_data.html)-DGP with $2000$ observations. The groups are defined based on the first covariate, analogously to the [CATE IRM Example](https://docs.doubleml.org/stable/examples/py_double_ml_cate.html), but rely on [LightGBM](https://lightgbm.readthedocs.io/en/latest/index.html) to estimate nuisance elements (due to time constraints).
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all groups (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all groups (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/irm/irm_cate_coverage_metadata.csv'
+metadata_file = '../../results/irm/irm_cate_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -93,15 +55,25 @@ display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "U
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
diff --git a/doc/irm/irm_gate.qmd b/doc/irm/irm_gate.qmd
index bf98cc4..9224fae 100644
--- a/doc/irm/irm_gate.qmd
+++ b/doc/irm/irm_gate.qmd
@@ -9,68 +9,30 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## GATE Coverage
The simulations are based on the the [make_heterogeneous_data](https://docs.doubleml.org/stable/api/generated/doubleml.datasets.make_heterogeneous_data.html)-DGP with $500$ observations. The groups are defined based on the first covariate, analogously to the [GATE IRM Example](https://docs.doubleml.org/stable/examples/py_double_ml_gate.html), but rely on [LightGBM](https://lightgbm.readthedocs.io/en/latest/index.html) to estimate nuisance elements (due to time constraints).
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all groups (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all groups (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/irm/irm_gate_coverage_metadata.csv'
+metadata_file = '../../results/irm/irm_gate_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -92,15 +54,25 @@ display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "U
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
diff --git a/doc/irm/qte.qmd b/doc/irm/qte.qmd
index 2a5f604..4b60ccc 100644
--- a/doc/irm/qte.qmd
+++ b/doc/irm/qte.qmd
@@ -8,68 +8,30 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## QTE
The results are based on a location-scale model as described the corresponding [Example](https://docs.doubleml.org/stable/examples/py_double_ml_pq.html) with $5000$ observations.
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/irm/pq_coverage_metadata.csv'
+metadata_file = '../../results/irm/pq_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -79,105 +41,82 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/pq_coverage_qte.csv", index_col=None)
+# set up data
+df_qte = pd.read_csv("../../results/irm/pq_effect_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_qte["repetition"].nunique() == 1
+n_rep_qte = df_qte["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+display_columns_qte = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_qte,
+ filters={"level": 0.95},
+ display_cols=display_columns_qte,
+ n_rep=n_rep_qte,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_qte,
+ filters={"level": 0.9},
+ display_cols=display_columns_qte,
+ n_rep=n_rep_qte,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
## Potential Quantiles
-```{python}
-#| echo: false
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
### Y(0) - Quantile
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/pq_coverage_pq0.csv", index_col=None)
+# set up data
+df_pq0 = pd.read_csv("../../results/irm/pq_Y0_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_pq0["repetition"].nunique() == 1
+n_rep_pq0 = df_pq0["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+display_columns_pq = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_pq0,
+ filters={"level": 0.95},
+ display_cols=display_columns_pq,
+ n_rep=n_rep_pq0,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_pq0,
+ filters={"level": 0.9},
+ display_cols=display_columns_pq,
+ n_rep=n_rep_pq0,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
### Y(1) - Quantile
@@ -186,91 +125,51 @@ make_pretty(df_ate_9, level, n_rep)
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/irm/pq_coverage_pq1.csv", index_col=None)
+df_pq1 = pd.read_csv("../../results/irm/pq_Y1_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_pq1["repetition"].nunique() == 1
+n_rep_pq1 = df_pq1["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+# display_columns_pq is the same as for Y(0)
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_pq1,
+ filters={"level": 0.95},
+ display_cols=display_columns_pq,
+ n_rep=n_rep_pq1,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_pq1,
+ filters={"level": 0.9},
+ display_cols=display_columns_pq,
+ n_rep=n_rep_pq1,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
## LQTE
-```{python}
-#| echo: false
-
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
-
-The results are based on a location-scale model as described the corresponding [Example](https://docs.doubleml.org/stable/examples/py_double_ml_pq.html#Local-Potential-Quantiles-(LPQs)) with $10,000$ observations.
+The results are based on a location-scale model as described the corresponding [Example](https://docs.doubleml.org/stable/examples/py_double_ml_pq.html#Local-Potential-Quantiles-(LPQs)) with $5,000$ observations.
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/irm/lpq_coverage_metadata.csv'
+metadata_file = '../../results/irm/lpq_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -280,105 +179,81 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/lpq_coverage_lqte.csv", index_col=None)
+# set up data
+df_lqte = pd.read_csv("../../results/irm/lpq_effect_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_lqte["repetition"].nunique() == 1
+n_rep_lqte = df_lqte["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+display_columns_lqte = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_lqte,
+ filters={"level": 0.95},
+ display_cols=display_columns_lqte,
+ n_rep=n_rep_lqte,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_lqte,
+ filters={"level": 0.9},
+ display_cols=display_columns_lqte,
+ n_rep=n_rep_lqte,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
## Local Potential Quantiles
-```{python}
-#| echo: false
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
-
### Local Y(0) - Quantile
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/lpq_coverage_lpq0.csv", index_col=None)
+# set up data
+df_lpq0 = pd.read_csv("../../results/irm/lpq_Y0_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_lpq0["repetition"].nunique() == 1
+n_rep_lpq0 = df_lpq0["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+display_columns_lpq = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_lpq0,
+ filters={"level": 0.95},
+ display_cols=display_columns_lpq,
+ n_rep=n_rep_lpq0,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_lpq0,
+ filters={"level": 0.9},
+ display_cols=display_columns_lpq,
+ n_rep=n_rep_lpq0,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
### Local Y(1) - Quantile
@@ -386,92 +261,52 @@ make_pretty(df_ate_9, level, n_rep)
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/lpq_coverage_lpq1.csv", index_col=None)
+# set up data
+df_lpq1 = pd.read_csv("../../results/irm/lpq_Y1_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_lpq1["repetition"].nunique() == 1
+n_rep_lpq1 = df_lpq1["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+# display_columns_lpq is the same as for Local Y(0)
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_lpq1,
+ filters={"level": 0.95},
+ display_cols=display_columns_lpq,
+ n_rep=n_rep_lpq1,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_lpq1,
+ filters={"level": 0.9},
+ display_cols=display_columns_lpq,
+ n_rep=n_rep_lpq1,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
## CVaR Effects
-```{python}
-#| echo: false
-
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
-
The results are based on a location-scale model as described the corresponding [Example](https://docs.doubleml.org/stable/examples/py_double_ml_cvar.html) with $5,000$ observations. Remark that the process is not linear.
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/irm/cvar_coverage_metadata.csv'
+metadata_file = '../../results/irm/cvar_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -481,105 +316,81 @@ print(metadata_df.T.to_string(header=False))
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/cvar_coverage_qte.csv", index_col=None)
+# set up data
+df_cvar_qte = pd.read_csv("../../results/irm/cvar_effect_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_cvar_qte["repetition"].nunique() == 1
+n_rep_cvar_qte = df_cvar_qte["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+display_columns_cvar_qte = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_cvar_qte,
+ filters={"level": 0.95},
+ display_cols=display_columns_cvar_qte,
+ n_rep=n_rep_cvar_qte,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_cvar_qte,
+ filters={"level": 0.9},
+ display_cols=display_columns_cvar_qte,
+ n_rep=n_rep_cvar_qte,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
## CVaR Potential Quantiles
-```{python}
-#| echo: false
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
-
### CVaR Y(0)
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/cvar_coverage_pq0.csv", index_col=None)
+# set up data
+df_cvar_pq0 = pd.read_csv("../../results/irm/cvar_Y0_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_cvar_pq0["repetition"].nunique() == 1
+n_rep_cvar_pq0 = df_cvar_pq0["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+display_columns_cvar_pq = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_cvar_pq0,
+ filters={"level": 0.95},
+ display_cols=display_columns_cvar_pq,
+ n_rep=n_rep_cvar_pq0,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_cvar_pq0,
+ filters={"level": 0.9},
+ display_cols=display_columns_cvar_pq,
+ n_rep=n_rep_cvar_pq0,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
### CVaR Y(1)
@@ -587,27 +398,37 @@ make_pretty(df_ate_9, level, n_rep)
```{python}
#| echo: false
-# set up data and rename columns
-df = pd.read_csv("../../results/irm/cvar_coverage_pq1.csv", index_col=None)
+# set up data
+df_cvar_pq1 = pd.read_csv("../../results/irm/cvar_Y1_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_cvar_pq1["repetition"].nunique() == 1
+n_rep_cvar_pq1 = df_cvar_pq1["repetition"].unique()[0]
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+# display_columns_cvar_pq is the same as for CVaR Y(0)
```
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_cvar_pq1,
+ filters={"level": 0.95},
+ display_cols=display_columns_cvar_pq,
+ n_rep=n_rep_cvar_pq1,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df_cvar_pq1,
+ filters={"level": 0.9},
+ display_cols=display_columns_cvar_pq,
+ n_rep=n_rep_cvar_pq1,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
+```
diff --git a/doc/plm/pliv.qmd b/doc/plm/pliv.qmd
index f8319ec..eb3b455 100644
--- a/doc/plm/pliv.qmd
+++ b/doc/plm/pliv.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## LATE Coverage
@@ -68,7 +30,7 @@ The simulations are based on the the [make_pliv_CHS2015](https://docs.doubleml.
```{python}
#| echo: false
-metadata_file = '../../results/plm/pliv_late_coverage_metadata.csv'
+metadata_file = '../../results/plm/pliv_late_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -79,34 +41,46 @@ print(metadata_df.T.to_string(header=False))
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/plm/pliv_late_coverage.csv", index_col=None)
+df_coverage_pliv = pd.read_csv("../../results/plm/pliv_late_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+if "repetition" in df_coverage_pliv.columns and df_coverage_pliv["repetition"].nunique() == 1:
+ n_rep_pliv = df_coverage_pliv["repetition"].unique()[0]
+elif "n_rep" in df_coverage_pliv.columns and df_coverage_pliv["n_rep"].nunique() == 1:
+ n_rep_pliv = df_coverage_pliv["n_rep"].unique()[0]
+else:
+ n_rep_pliv = "N/A"
-display_columns = ["Learner g", "Learner m", "Learner r", "Bias", "CI Length", "Coverage"]
+display_columns_pliv = ["Learner g", "Learner m", "Learner r", "Bias", "CI Length", "Coverage"]
```
### Partialling out
```{python}
#| echo: false
-score = "partialling out"
-level = 0.95
-df_ate_95 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-df_ate_95.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_coverage_pliv,
+ filters={"level": 0.95, "Score": "partialling out"},
+ display_cols=display_columns_pliv,
+ n_rep=n_rep_pliv,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "partialling out"
-level = 0.9
-df_ate_9 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-df_ate_9.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_coverage_pliv,
+ filters={"level": 0.90, "Score": "partialling out"},
+ display_cols=display_columns_pliv,
+ n_rep=n_rep_pliv,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage"]
+)
```
### IV-type
@@ -115,18 +89,26 @@ For the IV-type score, the learners `ml_l` and `ml_g` are both set to the same t
```{python}
#| echo: false
-score = "IV-type"
-level = 0.95
-df_ate_95 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_coverage_pliv,
+ filters={"level": 0.95, "Score": "IV-type"},
+ display_cols=display_columns_pliv,
+ n_rep=n_rep_pliv,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "IV-type"
-level = 0.9
-df_ate_9 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df_coverage_pliv,
+ filters={"level": 0.9, "Score": "IV-type"},
+ display_cols=display_columns_pliv,
+ n_rep=n_rep_pliv,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
+```
diff --git a/doc/plm/plr.qmd b/doc/plm/plr.qmd
index d49c2a8..f9e9304 100644
--- a/doc/plm/plr.qmd
+++ b/doc/plm/plr.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## ATE Coverage
@@ -68,7 +30,7 @@ The simulations are based on the the [make_plr_CCDDHNR2018](https://docs.double
```{python}
#| echo: false
-metadata_file = '../../results/plm/plr_ate_coverage_metadata.csv'
+metadata_file = '../../results/plm/plr_ate_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -79,34 +41,46 @@ print(metadata_df.T.to_string(header=False))
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/plm/plr_ate_coverage.csv", index_col=None)
+df_coverage = pd.read_csv("../../results/plm/plr_ate_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+if "repetition" in df_coverage.columns and df_coverage["repetition"].nunique() == 1:
+ n_rep_coverage = df_coverage["repetition"].unique()[0]
+elif "n_rep" in df_coverage.columns and df_coverage["n_rep"].nunique() == 1:
+ n_rep_coverage = df_coverage["n_rep"].unique()[0]
+else:
+ n_rep_coverage = "N/A" # Fallback if n_rep cannot be determined
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
+display_columns_coverage = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage"]
```
### Partialling out
```{python}
# | echo: false
-score = "partialling out"
-level = 0.95
-df_ate_95 = df[(df["level"] == level) & (df["score"] == score)][display_columns]
-df_ate_95.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_coverage,
+ filters={"level": 0.95, "Score": "partialling out"},
+ display_cols=display_columns_coverage,
+ n_rep=n_rep_coverage,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "partialling out"
-level = 0.9
-df_ate_9 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-df_ate_9.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_coverage,
+ filters={"level": 0.9, "Score": "partialling out"},
+ display_cols=display_columns_coverage,
+ n_rep=n_rep_coverage,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage"]
+)
```
### IV-type
@@ -115,20 +89,28 @@ For the IV-type score, the learners `ml_l` and `ml_g` are both set to the same t
```{python}
#| echo: false
-score = "IV-type"
-level = 0.95
-df_ate_95 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_coverage,
+ filters={"level": 0.95, "Score": "IV-type"},
+ display_cols=display_columns_coverage,
+ n_rep=n_rep_coverage,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "IV-type"
-level = 0.9
-df_ate_9 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_coverage,
+ filters={"level": 0.9, "Score": "IV-type"},
+ display_cols=display_columns_coverage,
+ n_rep=n_rep_coverage,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
## ATE Sensitivity
@@ -142,9 +124,9 @@ Further, the corresponding confidence intervals are one-sided (since the directi
```{python}
#| echo: false
-metadata_file = '../../results/plm/plr_ate_sensitivity_metadata.csv'
-metadata_df = pd.read_csv(metadata_file)
-print(metadata_df.T.to_string(header=False))
+metadata_file_sens = '../../results/plm/plr_ate_sensitivity_metadata.csv'
+metadata_df_sens = pd.read_csv(metadata_file_sens)
+print(metadata_df_sens.T.to_string(header=False))
```
:::
@@ -153,91 +135,48 @@ print(metadata_df.T.to_string(header=False))
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/plm/plr_ate_sensitivity.csv", index_col=None)
+df_sensitivity = pd.read_csv("../../results/plm/plr_ate_sensitivity_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+if "repetition" in df_sensitivity.columns and df_sensitivity["repetition"].nunique() == 1:
+ n_rep_sensitivity = df_sensitivity["repetition"].unique()[0]
+elif "n_rep" in df_sensitivity.columns and df_sensitivity["n_rep"].nunique() == 1:
+ n_rep_sensitivity = df_sensitivity["n_rep"].unique()[0]
+else:
+ n_rep_sensitivity = "N/A"
-display_columns = [
+display_columns_sensitivity = [
"Learner g", "Learner m", "Bias", "Bias (Lower)", "Bias (Upper)", "Coverage", "Coverage (Lower)", "Coverage (Upper)", "RV", "RVa"]
```
-```{python}
-#| echo: false
-
-import numpy as np
-import pandas as pd
-from itables import init_notebook_mode, show, options
-
-init_notebook_mode(all_interactive=True)
-
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Coverage (Upper)"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Coverage (Upper)"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Coverage (Upper)"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Coverage (Upper)"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
-```
### Partialling out
```{python}
#| echo: false
-score = "partialling out"
-level = 0.95
-df_ate_95 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-df_ate_95.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_sensitivity,
+ filters={"level": 0.95, "Score": "partialling out"},
+ display_cols=display_columns_sensitivity,
+ n_rep=n_rep_sensitivity,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Coverage (Upper)"]
+)
```
```{python}
#| echo: false
-score = "partialling out"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-df_ate_9.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_9, level, n_rep)
+#|
+generate_and_show_styled_table(
+ main_df=df_sensitivity,
+ filters={"level": 0.9, "Score": "partialling out"},
+ display_cols=display_columns_sensitivity,
+ n_rep=n_rep_sensitivity,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Coverage (Upper)"]
+)
```
### IV-type
@@ -246,18 +185,26 @@ For the IV-type score, the learners `ml_l` and `ml_g` are both set to the same t
```{python}
#| echo: false
-score = "IV-type"
-level = 0.95
-df_ate_95 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_sensitivity,
+ filters={"level": 0.95, "Score": "IV-type"},
+ display_cols=display_columns_sensitivity,
+ n_rep=n_rep_sensitivity,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Coverage (Upper)"]
+)
```
```{python}
#| echo: false
-score = "IV-type"
-level = 0.9
-df_ate_9 = df[(df['level'] == level) & (df["score"] == score)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df_sensitivity,
+ filters={"level": 0.9, "Score": "IV-type"},
+ display_cols=display_columns_sensitivity,
+ n_rep=n_rep_sensitivity,
+ level_col="level",
+ coverage_highlight_cols=["Coverage", "Coverage (Upper)"]
+)
+```
diff --git a/doc/plm/plr_cate.qmd b/doc/plm/plr_cate.qmd
index 11ea2fb..1581025 100644
--- a/doc/plm/plr_cate.qmd
+++ b/doc/plm/plr_cate.qmd
@@ -9,68 +9,30 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## CATE Coverage
The simulations are based on the the [make_heterogeneous_data](https://docs.doubleml.org/stable/api/generated/doubleml.datasets.make_heterogeneous_data.html)-DGP with $2000$ observations. The groups are defined based on the first covariate, analogously to the [CATE PLR Example](https://docs.doubleml.org/stable/examples/py_double_ml_cate_plr.html), but rely on [LightGBM](https://lightgbm.readthedocs.io/en/latest/index.html) to estimate nuisance elements (due to time constraints).
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all groups (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all groups (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/plm/plr_cate_coverage_metadata.csv'
+metadata_file = '../../results/plm/plr_cate_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -81,28 +43,77 @@ print(metadata_df.T.to_string(header=False))
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/plm/plr_cate_coverage.csv", index_col=None)
+df_cate = pd.read_csv("../../results/plm/plr_cate_coverage.csv", index_col=None) # Renamed to df_cate
+
+# Your existing logic for n_rep is fine, just using the new df_cate name
+if "repetition" in df_cate.columns and df_cate["repetition"].nunique() == 1:
+ n_rep_cate = df_cate["repetition"].unique()[0]
+elif "n_rep" in df_cate.columns and df_cate["n_rep"].nunique() == 1:
+ n_rep_cate = df_cate["n_rep"].unique()[0]
+else:
+ n_rep_cate = "N/A"
+
+
+display_columns_cate = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```
+
+### Partialling out
+
+```{python}
+#| echo: false
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+generate_and_show_styled_table(
+ main_df=df_cate,
+ filters={"level": 0.95, "Score": "partialling out"},
+ display_cols=display_columns_cate,
+ n_rep=n_rep_cate,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+```{python}
+#| echo: false
+
+generate_and_show_styled_table(
+ main_df=df_cate,
+ filters={"level": 0.9, "Score": "partialling out"},
+ display_cols=display_columns_cate,
+ n_rep=n_rep_cate,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
+
+### IV-type
+
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-df_ate_95.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_cate,
+ filters={"level": 0.95, "Score": "IV-type"},
+ display_cols=display_columns_cate,
+ n_rep=n_rep_cate,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-df_ate_9.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df_cate,
+ filters={"level": 0.9, "Score": "IV-type"},
+ display_cols=display_columns_cate,
+ n_rep=n_rep_cate,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
diff --git a/doc/plm/plr_gate.qmd b/doc/plm/plr_gate.qmd
index bf4a478..d32bd4e 100644
--- a/doc/plm/plr_gate.qmd
+++ b/doc/plm/plr_gate.qmd
@@ -9,68 +9,30 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage", "Uniform Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage", "Uniform Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage", "Uniform Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## GATE Coverage
The simulations are based on the the [make_heterogeneous_data](https://docs.doubleml.org/stable/api/generated/doubleml.datasets.make_heterogeneous_data.html)-DGP with $500$ observations. The groups are defined based on the first covariate, analogously to the [GATE PLR Example](https://docs.doubleml.org/stable/examples/py_double_ml_gate_plr.html), but rely on [LightGBM](https://lightgbm.readthedocs.io/en/latest/index.html) to estimate nuisance elements (due to time constraints).
-The non-uniform results (coverage, ci length and bias) refer to averaged values over all groups (point-wise confidende intervals).
+The non-uniform results (coverage, ci length and bias) refer to averaged values over all groups (point-wise confidende intervals).
::: {.callout-note title="Metadata" collapse="true"}
```{python}
#| echo: false
-metadata_file = '../../results/plm/plr_gate_coverage_metadata.csv'
+metadata_file = '../../results/plm/plr_gate_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -81,28 +43,75 @@ print(metadata_df.T.to_string(header=False))
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/plm/plr_gate_coverage.csv", index_col=None)
+df_gate = pd.read_csv("../../results/plm/plr_gate_coverage.csv", index_col=None) # Renamed to df_gate for clarity
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+if "repetition" in df_gate.columns and df_gate["repetition"].nunique() == 1:
+ n_rep_gate = df_gate["repetition"].unique()[0]
+elif "n_rep" in df_gate.columns and df_gate["n_rep"].nunique() == 1: # Check for n_rep as well
+ n_rep_gate = df_gate["n_rep"].unique()[0]
+else:
+ n_rep_gate = "N/A" # Fallback if n_rep cannot be determined
-display_columns = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
+
+display_columns_gate = ["Learner g", "Learner m", "Bias", "CI Length", "Coverage", "Uniform CI Length", "Uniform Coverage"]
```
+### Partialling out
+
```{python}
#| echo: false
-level = 0.95
-df_ate_95 = df[df['level'] == level][display_columns]
-df_ate_95.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_gate,
+ filters={"level": 0.95, "Score": "partialling out"},
+ display_cols=display_columns_gate,
+ n_rep=n_rep_gate,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-df_ate_9 = df[df['level'] == level][display_columns]
-df_ate_9.rename(columns={"Learner g": "Learner l"}, inplace=True)
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df_gate,
+ filters={"level": 0.9, "Score": "partialling out"},
+ display_cols=display_columns_gate,
+ n_rep=n_rep_gate,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+### IV-type
+
+```{python}
+#| echo: false
+
+generate_and_show_styled_table(
+ main_df=df_gate,
+ filters={"level": 0.95, "Score": "IV-type"},
+ display_cols=display_columns_gate,
+ n_rep=n_rep_gate,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
+
+```{python}
+#| echo: false
+
+generate_and_show_styled_table(
+ main_df=df_gate,
+ filters={"level": 0.9, "Score": "IV-type"},
+ display_cols=display_columns_gate,
+ n_rep=n_rep_gate,
+ level_col="level",
+ rename_map={"Learner g": "Learner l"},
+ coverage_highlight_cols=["Coverage", "Uniform Coverage"]
+)
+```
diff --git a/doc/rdd/rdd.qmd b/doc/rdd/rdd.qmd
index 512e48a..31cdddb 100644
--- a/doc/rdd/rdd.qmd
+++ b/doc/rdd/rdd.qmd
@@ -10,55 +10,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## Sharp Design
@@ -69,7 +31,7 @@ The simulations are based on the [make_simple_rdd_data](https://docs.doubleml.or
```{python}
#| echo: false
-metadata_file = '../../results/rdd/rdd_sharp_coverage_metadata.csv'
+metadata_file = '../../results/rdd/rdd_sharp_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -80,28 +42,36 @@ print(metadata_df.T.to_string(header=False))
# | echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/rdd/rdd_sharp_coverage.csv", index_col=None)
+df_sharp = pd.read_csv("../../results/rdd/rdd_sharp_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_sharp["repetition"].nunique() == 1
+n_rep_sharp = df_sharp["repetition"].unique()[0]
-display_columns = ["Method", "Learner g", "fs specification", "Bias", "CI Length", "Coverage"]
+display_columns_sharp = ["Method", "Learner g", "fs_specification", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_sharp,
+ filters={"level": 0.95},
+ display_cols=display_columns_sharp,
+ n_rep=n_rep_sharp,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_sharp,
+ filters={"level": 0.9},
+ display_cols=display_columns_sharp,
+ n_rep=n_rep_sharp,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
@@ -113,7 +83,7 @@ The simulations are based on the [make_simple_rdd_data](https://docs.doubleml.or
```{python}
#| echo: false
-metadata_file = '../../results/rdd/rdd_fuzzy_coverage_metadata.csv'
+metadata_file = '../../results/rdd/rdd_fuzzy_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -124,26 +94,34 @@ print(metadata_df.T.to_string(header=False))
# | echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/rdd/rdd_fuzzy_coverage.csv", index_col=None)
+df_fuzzy = pd.read_csv("../../results/rdd/rdd_fuzzy_coverage.csv", index_col=None)
-assert df["repetition"].nunique() == 1
-n_rep = df["repetition"].unique()[0]
+assert df_fuzzy["repetition"].nunique() == 1
+n_rep_fuzzy = df_fuzzy["repetition"].unique()[0]
-display_columns = ["Method", "Learner g", "Learner m", "fs specification", "Bias", "CI Length", "Coverage"]
+display_columns_fuzzy = ["Method", "Learner g", "Learner m", "fs_specification", "Bias", "CI Length", "Coverage"]
```
```{python}
#| echo: false
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df_fuzzy,
+ filters={"level": 0.95},
+ display_cols=display_columns_fuzzy,
+ n_rep=n_rep_fuzzy,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df_fuzzy,
+ filters={"level": 0.9},
+ display_cols=display_columns_fuzzy,
+ n_rep=n_rep_fuzzy,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
+```
diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt
index f6afd08..ed281b2 100644
--- a/doc/requirements-doc.txt
+++ b/doc/requirements-doc.txt
@@ -3,4 +3,4 @@ pandas
itables
nbformat
nbclient
-plotly
\ No newline at end of file
+plotly
diff --git a/doc/ssm/ssm_mar.qmd b/doc/ssm/ssm_mar.qmd
index cac3864..a396fa4 100644
--- a/doc/ssm/ssm_mar.qmd
+++ b/doc/ssm/ssm_mar.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## ATE Coverage
@@ -68,7 +30,7 @@ The simulations are based on the [make_ssm_data](https://docs.doubleml.org/stabl
```{python}
#| echo: false
-metadata_file = '../../results/irm/ssm_mar_ate_coverage_metadata.csv'
+metadata_file = '../../results/ssm/ssm_mar_ate_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -80,7 +42,7 @@ print(metadata_df.T.to_string(header=False))
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/irm/ssm_mar_ate_coverage.csv", index_col=None)
+df = pd.read_csv("../../results/ssm/ssm_mar_ate_coverage.csv", index_col=None)
assert df["repetition"].nunique() == 1
n_rep = df["repetition"].unique()[0]
@@ -90,17 +52,24 @@ display_columns = ["Learner g", "Learner m", "Learner pi", "Bias", "CI Length",
```{python}
#| echo: false
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-score = "partialling out"
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
-```
\ No newline at end of file
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
+```
diff --git a/doc/ssm/ssm_nonignorable.qmd b/doc/ssm/ssm_nonignorable.qmd
index 3afb9b9..8eff76b 100644
--- a/doc/ssm/ssm_nonignorable.qmd
+++ b/doc/ssm/ssm_nonignorable.qmd
@@ -9,55 +9,17 @@ jupyter: python3
import numpy as np
import pandas as pd
-from itables import init_notebook_mode, show, options
+from itables import init_notebook_mode
+import os
+import sys
-init_notebook_mode(all_interactive=True)
+doc_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
+if doc_dir not in sys.path:
+ sys.path.append(doc_dir)
+
+from utils.style_tables import generate_and_show_styled_table
-def highlight_range(s, level=0.95, dist=0.05, props=''):
- color_grid = np.where((s >= level-dist) &
- (s <= level+dist), props, '')
- return color_grid
-
-
-def color_coverage(df, level):
- # color coverage column order is important
- styled_df = df.apply(
- highlight_range,
- level=level,
- dist=1.0,
- props='color:black;background-color:red',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.1,
- props='color:black;background-color:yellow',
- subset=["Coverage"])
- styled_df = styled_df.apply(
- highlight_range,
- level=level,
- dist=0.05,
- props='color:white;background-color:darkgreen',
- subset=["Coverage"])
-
- # set all coverage values to bold
- styled_df = styled_df.set_properties(
- **{'font-weight': 'bold'},
- subset=["Coverage"])
- return styled_df
-
-
-def make_pretty(df, level, n_rep):
- styled_df = df.style.hide(axis="index")
- # Format only float columns
- float_cols = df.select_dtypes(include=['float']).columns
- styled_df = styled_df.format({col: "{:.3f}" for col in float_cols})
-
- # color coverage column order is important
- styled_df = color_coverage(styled_df, level)
- caption = f"Coverage for {level*100}%-Confidence Interval over {n_rep} Repetitions"
-
- return show(styled_df, caption=caption, allow_html=True)
+init_notebook_mode(all_interactive=True)
```
## ATE Coverage
@@ -69,7 +31,7 @@ The simulations are based on the [make_ssm_data](https://docs.doubleml.org/stabl
```{python}
#| echo: false
#| collapse: true
-metadata_file = '../../results/irm/ssm_nonignorable_ate_coverage_metadata.csv'
+metadata_file = '../../results/ssm/ssm_nonig_ate_metadata.csv'
metadata_df = pd.read_csv(metadata_file)
print(metadata_df.T.to_string(header=False))
```
@@ -80,7 +42,7 @@ print(metadata_df.T.to_string(header=False))
#| echo: false
# set up data and rename columns
-df = pd.read_csv("../../results/irm/ssm_nonignorable_ate_coverage.csv", index_col=None)
+df = pd.read_csv("../../results/ssm/ssm_nonig_ate_coverage.csv", index_col=None)
assert df["repetition"].nunique() == 1
n_rep = df["repetition"].unique()[0]
@@ -90,16 +52,24 @@ display_columns = ["Learner g", "Learner m", "Learner pi", "Bias", "CI Length",
```{python}
#| echo: false
-level = 0.95
-
-df_ate_95 = df[(df['level'] == level)][display_columns]
-make_pretty(df_ate_95, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.95},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
```{python}
#| echo: false
-level = 0.9
-
-df_ate_9 = df[(df['level'] == level)][display_columns]
-make_pretty(df_ate_9, level, n_rep)
+generate_and_show_styled_table(
+ main_df=df,
+ filters={"level": 0.9},
+ display_cols=display_columns,
+ n_rep=n_rep,
+ level_col="level",
+ coverage_highlight_cols=["Coverage"]
+)
```
diff --git a/doc/styles.css b/doc/styles.css
index 2ddf50c..951e121 100644
--- a/doc/styles.css
+++ b/doc/styles.css
@@ -1 +1,146 @@
-/* css styles */
+/* Import Google Fonts */
+@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500;600&display=swap');
+
+/* Root font variables */
+:root {
+ --font-family-sans: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
+ --font-family-mono: 'JetBrains Mono', 'SF Mono', Monaco, Inconsolata, 'Roboto Mono', 'Source Code Pro', monospace;
+}
+
+/* Base typography */
+body {
+ font-family: var(--font-family-sans);
+ font-weight: 400;
+ line-height: 1.6;
+ font-feature-settings: 'kern' 1, 'liga' 1, 'calt' 1;
+}
+
+/* Headings */
+h1,
+h2,
+h3,
+h4,
+h5,
+h6 {
+ font-family: var(--font-family-sans);
+ font-weight: 600;
+ line-height: 1.3;
+ letter-spacing: -0.025em;
+}
+
+h1 {
+ font-weight: 700;
+ font-size: 2.25rem;
+}
+
+h2 {
+ font-weight: 600;
+ font-size: 1.875rem;
+}
+
+h3 {
+ font-weight: 600;
+ font-size: 1.5rem;
+}
+
+h4 {
+ font-weight: 500;
+ font-size: 1.25rem;
+}
+
+/* Code and pre-formatted text */
+code,
+pre,
+.sourceCode {
+ font-family: var(--font-family-mono);
+ font-weight: 400;
+ font-feature-settings: 'liga' 1, 'calt' 1;
+}
+
+/* Inline code */
+code:not(pre code) {
+ font-size: 0.875em;
+ font-weight: 500;
+ padding: 0.125rem 0.25rem;
+ background-color: rgba(175, 184, 193, 0.2);
+ border-radius: 0.25rem;
+}
+
+/* Code blocks */
+pre {
+ font-size: 0.875rem;
+ line-height: 1.5;
+ padding: 1rem;
+ border-radius: 0.5rem;
+ overflow-x: auto;
+}
+
+/* Navigation and UI elements */
+.navbar-brand,
+.nav-link {
+ font-family: var(--font-family-sans);
+ font-weight: 500;
+}
+
+.sidebar .nav-link {
+ font-weight: 400;
+}
+
+.sidebar .nav-link.active {
+ font-weight: 500;
+}
+
+/* Tables */
+table {
+ font-family: var(--font-family-sans);
+ font-variant-numeric: tabular-nums;
+}
+
+th {
+ font-weight: 600;
+}
+
+/* Math equations - ensure good readability */
+.math {
+ font-family: 'STIX Two Math', 'Times New Roman', serif;
+}
+
+/* Buttons and interactive elements */
+.btn {
+ font-family: var(--font-family-sans);
+ font-weight: 500;
+ letter-spacing: 0.025em;
+}
+
+/* Improve readability for long text */
+.content {
+ max-width: none;
+}
+
+p {
+ margin-bottom: 1.25rem;
+}
+
+/* List styling */
+ul,
+ol {
+ margin-bottom: 1.25rem;
+}
+
+li {
+ margin-bottom: 0.5rem;
+}
+
+/* Better spacing for equations */
+.math.display {
+ margin: 1.5rem 0;
+}
+
+/* Blockquotes */
+blockquote {
+ font-style: italic;
+ border-left: 4px solid #e9ecef;
+ padding-left: 1rem;
+ margin-left: 0;
+ color: #6c757d;
+}
diff --git a/doc/utils/style_tables.py b/doc/utils/style_tables.py
new file mode 100644
index 0000000..f932fe9
--- /dev/null
+++ b/doc/utils/style_tables.py
@@ -0,0 +1,329 @@
+import numpy as np
+import pandas as pd
+from pandas.io.formats.style import Styler
+from typing import Union, Optional, List, Any
+from itables import show
+from .styling import (
+ TABLE_STYLING,
+ COVERAGE_THRESHOLDS,
+ get_coverage_tier_css_props,
+)
+
+
+# Define highlighting tiers using centralized color configuration
+HIGHLIGHT_TIERS = [
+ {"dist": COVERAGE_THRESHOLDS["poor"], "props": get_coverage_tier_css_props("poor")},
+ {
+ "dist": COVERAGE_THRESHOLDS["medium"],
+ "props": get_coverage_tier_css_props("medium", "500"),
+ },
+ {"dist": COVERAGE_THRESHOLDS["good"], "props": get_coverage_tier_css_props("good")},
+]
+
+
+def _apply_highlight_range(
+ s_col: pd.Series, level: float, dist: float, props: str
+) -> np.ndarray:
+ """
+ Helper function for Styler.apply. Applies CSS properties based on a numeric range.
+ Returns an array of CSS strings.
+ """
+ s_numeric = pd.to_numeric(
+ s_col, errors="coerce"
+ ) # Convert to numeric, non-convertibles become NaN
+
+ # Apply style ONLY if value is WITHIN the current dist from level
+ # Use absolute difference to determine which tier applies
+ abs_diff = np.abs(s_numeric - level)
+ condition = abs_diff <= dist
+ return np.where(condition, props, "")
+
+
+def _determine_coverage_tier(value: float, level: float) -> str:
+ """
+ Determine which coverage tier a value belongs to based on distance from level.
+ Returns the most specific (smallest distance) tier that applies.
+ """
+ if pd.isna(value):
+ return ""
+
+ abs_diff = abs(value - level)
+
+ # Check tiers from most specific to least specific
+ sorted_tiers = sorted(HIGHLIGHT_TIERS, key=lambda x: x["dist"])
+
+ for tier in sorted_tiers:
+ if abs_diff <= tier["dist"]:
+ return tier["props"]
+
+ return ""
+
+
+def _apply_base_table_styling(styler: Styler) -> Styler:
+ """
+ Apply base styling to the table including headers, borders, and overall appearance.
+ """
+ # Define CSS styles for clean table appearance using centralized colors
+ styles = [
+ # Table-wide styling
+ {
+ "selector": "table",
+ "props": [
+ ("border-collapse", "separate"),
+ ("border-spacing", "0"),
+ ("width", "100%"),
+ (
+ "font-family",
+ '"Segoe UI", -apple-system, BlinkMacSystemFont, "Roboto", sans-serif',
+ ),
+ ("font-size", "14px"),
+ ("line-height", "1.5"),
+ ("box-shadow", "0 2px 8px rgba(0,0,0,0.1)"),
+ ("border-radius", "8px"),
+ ("overflow", "hidden"),
+ ],
+ },
+ # Header styling
+ {
+ "selector": "thead th",
+ "props": [
+ ("background-color", TABLE_STYLING["header_bg"]),
+ ("color", TABLE_STYLING["header_text"]),
+ ("font-weight", "600"),
+ ("text-align", "center"),
+ ("padding", "12px 16px"),
+ ("border-bottom", f'2px solid {TABLE_STYLING["border"]}'),
+ ("position", "sticky"),
+ ("top", "0"),
+ ("z-index", "10"),
+ ],
+ },
+ # Cell styling
+ {
+ "selector": "tbody td",
+ "props": [
+ ("padding", "10px 16px"),
+ ("text-align", "center"),
+ ("border-bottom", f'1px solid {TABLE_STYLING["border"]}'),
+ ("transition", "background-color 0.2s ease"),
+ ],
+ },
+ # Row hover effect
+ {
+ "selector": "tbody tr:hover td",
+ "props": [("background-color", TABLE_STYLING["hover_bg"])],
+ },
+ # Caption styling
+ {
+ "selector": "caption",
+ "props": [
+ ("color", TABLE_STYLING["caption_color"]),
+ ("font-size", "16px"),
+ ("font-weight", "600"),
+ ("margin-bottom", "16px"),
+ ("text-align", "left"),
+ ("caption-side", "top"),
+ ],
+ },
+ ]
+
+ return styler.set_table_styles(styles)
+
+
+def color_coverage_columns(
+ styler: Styler, level: float, coverage_cols: list[str] = ["Coverage"]
+) -> Styler:
+ """
+ Applies tiered highlighting to specified coverage columns of a Styler object.
+ Uses non-overlapping logic to prevent CSS conflicts.
+ """
+ if not isinstance(styler, Styler):
+ raise TypeError("Expected a pandas Styler object.")
+
+ # Ensure coverage_cols is a list
+ if isinstance(coverage_cols, str):
+ coverage_cols = [coverage_cols]
+
+ # Filter for columns that actually exist in the DataFrame being styled
+ valid_coverage_cols = [col for col in coverage_cols if col in styler.data.columns]
+
+ if not valid_coverage_cols:
+ return styler # No valid columns to style
+
+ # Apply base styling first
+ current_styler = _apply_base_table_styling(styler)
+
+ # Apply single tier styling to prevent conflicts
+ def apply_coverage_tier_to_cell(s_col):
+ """Apply only the most appropriate coverage tier for each cell."""
+ return s_col.apply(lambda x: _determine_coverage_tier(x, level))
+
+ current_styler = current_styler.apply(
+ apply_coverage_tier_to_cell, subset=valid_coverage_cols
+ )
+
+ # Apply additional styling to coverage columns for emphasis
+ current_styler = current_styler.set_properties(
+ **{
+ "text-align": "center",
+ "font-family": "monospace",
+ "font-size": "13px",
+ },
+ subset=valid_coverage_cols,
+ )
+
+ return current_styler
+
+
+def create_styled_table(
+ df: pd.DataFrame,
+ level: float,
+ n_rep: Union[int, str],
+ caption_prefix: str = "Coverage",
+ coverage_cols: List[str] = ["Coverage"],
+ float_precision: str = "{:.3f}",
+) -> Styler:
+ """
+ Creates a styled pandas DataFrame (Styler object) for display.
+ - Hides the DataFrame index.
+ - Formats float columns to a specified precision.
+ - Applies conditional highlighting to coverage columns.
+ - Sets a descriptive caption.
+ """
+ if not isinstance(df, pd.DataFrame):
+ return pd.DataFrame({"Error": ["Input is not a DataFrame."]}).style.hide(
+ axis="index"
+ )
+
+ if df.empty:
+ empty_df_cols = df.columns if df.columns.tolist() else ["Info"]
+ message_val = (
+ ["No data to display."]
+ if not df.columns.tolist()
+ else [None] * len(empty_df_cols)
+ )
+ df_to_style = pd.DataFrame(
+ (
+ dict(zip(empty_df_cols, [[v] for v in message_val]))
+ if not df.columns.tolist()
+ else {} # Pass empty dict for empty DataFrame with columns
+ ),
+ columns=empty_df_cols,
+ )
+ return df_to_style.style.hide(axis="index").set_caption("No data to display.")
+
+ # Prepare float formatting dictionary
+ float_cols = df.select_dtypes(include=["float", "float64", "float32"]).columns
+ format_dict = {col: float_precision for col in float_cols if col in df.columns}
+
+ # Create and set the caption text
+ level_percent = level * 100
+ if abs(level_percent - round(level_percent)) < 1e-9:
+ level_display = f"{int(round(level_percent))}"
+ else:
+ level_display = f"{level_percent:.1f}"
+
+ n_rep_display = str(n_rep) # Ensure n_rep is a string for the caption
+
+ caption_text = f"{caption_prefix} for {level_display}%-Confidence Interval over {n_rep_display} Repetitions"
+
+ # Chain Styler methods
+ styled_df = (
+ df.style.hide(axis="index")
+ .format(
+ format_dict if format_dict else None
+ ) # Pass None if no float cols to format
+ .pipe(color_coverage_columns, level=level, coverage_cols=coverage_cols)
+ .set_caption(caption_text)
+ )
+
+ return styled_df
+
+
+def generate_and_show_styled_table(
+ main_df: pd.DataFrame,
+ filters: dict[str, Any],
+ display_cols: List[str],
+ n_rep: Union[int, str],
+ level_col: str = "level",
+ rename_map: Optional[dict[str, str]] = None,
+ caption_prefix: str = "Coverage",
+ coverage_highlight_cols: List[str] = ["Coverage"],
+ float_precision: str = "{:.3f}",
+):
+ """
+ Filters a DataFrame based on a dictionary of conditions,
+ creates a styled table, and displays it.
+ """
+ if main_df.empty:
+ print("Warning: Input DataFrame is empty.")
+ # Optionally, show an empty table or a message
+ empty_styled_df = (
+ pd.DataFrame(columns=display_cols)
+ .style.hide(axis="index")
+ .set_caption("No data available (input empty).")
+ )
+ show(empty_styled_df, allow_html=True)
+ return
+
+ # Build filter condition
+ current_df = main_df
+ filter_conditions = []
+ filter_description_parts = []
+
+ for col, value in filters.items():
+ if col not in current_df.columns:
+ print(
+ f"Warning: Filter column '{col}' not found in DataFrame. Skipping this filter."
+ )
+ continue
+ current_df = current_df[current_df[col] == value]
+ filter_conditions.append(f"{col} == {value}")
+ filter_description_parts.append(f"{col}='{value}'")
+
+ filter_description = " & ".join(filter_description_parts)
+
+ if current_df.empty:
+ level_val = filters.get(level_col, "N/A")
+ level_percent_display = (
+ f"{level_val*100}%" if isinstance(level_val, (int, float)) else level_val
+ )
+ caption_msg = f"No data after filtering for {filter_description} at {level_percent_display} level."
+ print(f"Warning: {caption_msg}")
+ empty_styled_df = (
+ pd.DataFrame(columns=display_cols)
+ .style.hide(axis="index")
+ .set_caption(caption_msg)
+ )
+ show(empty_styled_df, allow_html=True)
+ return
+
+ df_filtered = current_df[
+ display_cols
+ ].copy() # Select display columns after filtering
+
+ if rename_map:
+ df_filtered.rename(columns=rename_map, inplace=True)
+
+ # Determine the level for styling from the filters, if present
+ styling_level = filters.get(level_col)
+ if styling_level is None or not isinstance(styling_level, (float, int)):
+ print(
+ f"Warning: '{level_col}' not found in filters or is not numeric. Cannot determine styling level for highlighting."
+ )
+ # Fallback or raise error, for now, we'll proceed without level-specific caption part if it's missing
+ # Or you could try to infer it if there's only one unique level in the filtered data
+ if level_col in df_filtered.columns and df_filtered[level_col].nunique() == 1:
+ styling_level = df_filtered[level_col].iloc[0]
+ else: # Default to a common value or skip styling that depends on 'level'
+ styling_level = 0.95 # Default, or handle error
+
+ styled_table = create_styled_table(
+ df_filtered,
+ styling_level, # Use the level from filters for styling
+ n_rep,
+ caption_prefix=caption_prefix,
+ coverage_cols=coverage_highlight_cols,
+ float_precision=float_precision,
+ )
+ show(styled_table, allow_html=True)
diff --git a/doc/utils/styling.py b/doc/utils/styling.py
new file mode 100644
index 0000000..0362df6
--- /dev/null
+++ b/doc/utils/styling.py
@@ -0,0 +1,91 @@
+"""
+Styling utilities for DoubleML Coverage tables and documentation.
+
+This module provides helper functions for applying consistent styling
+based on the centralized theme configuration.
+"""
+
+import yaml
+from pathlib import Path
+from typing import Dict, Any
+import copy
+
+
+def _load_theme_config() -> Dict[str, Any]:
+ """Load theme configuration from YAML file."""
+ config_path = Path(__file__).parent / "theme.yml"
+ with open(config_path, "r") as f:
+ return yaml.safe_load(f)
+
+
+# Load configuration once at module import
+_THEME = _load_theme_config()
+
+# Expose configuration for backward compatibility and direct access
+COVERAGE_COLORS = _THEME["coverage_colors"]
+TABLE_STYLING = _THEME["table_styling"]
+COVERAGE_THRESHOLDS = _THEME["coverage_thresholds"]
+
+
+def get_coverage_tier_css_props(tier: str, font_weight: str = "600") -> str:
+ """
+ Generate CSS properties string for a coverage performance tier.
+
+ Args:
+ tier: One of 'good', 'medium', 'poor'
+ font_weight: CSS font-weight value
+
+ Returns:
+ CSS properties string for use with pandas Styler
+ """
+ if tier not in COVERAGE_COLORS:
+ raise ValueError(
+ f"Unknown tier '{tier}'. Must be one of: {list(COVERAGE_COLORS.keys())}"
+ )
+
+ colors = COVERAGE_COLORS[tier]
+ return (
+ f"color:{colors['text']};"
+ f"background-color:{colors['background']};"
+ f"border-left:4px solid {colors['border']};"
+ f"font-weight:{font_weight};"
+ )
+
+
+def get_coverage_tier_html_span(tier: str, text: str = None) -> str:
+ """
+ Generate HTML span element with coverage tier styling for documentation.
+
+ Args:
+ tier: One of 'good', 'medium', 'poor'
+ text: Text to display (defaults to tier description)
+
+ Returns:
+ HTML span element with inline styling
+ """
+ if tier not in COVERAGE_COLORS:
+ raise ValueError(
+ f"Unknown tier '{tier}'. Must be one of: {list(COVERAGE_COLORS.keys())}"
+ )
+
+ colors = COVERAGE_COLORS[tier]
+ display_text = text or colors["description"]
+
+ return (
+ f''
+ f"{display_text}"
+ )
+
+
+def get_theme_config() -> Dict[str, Any]:
+ """
+ Get the complete theme configuration.
+
+ Returns:
+ Dictionary containing all theme settings
+ """
+ return copy.deepcopy(_THEME)
diff --git a/doc/utils/theme.yml b/doc/utils/theme.yml
new file mode 100644
index 0000000..3e98bed
--- /dev/null
+++ b/doc/utils/theme.yml
@@ -0,0 +1,31 @@
+# DoubleML Coverage Theme Configuration
+# Central color palette and styling settings
+
+coverage_colors:
+ good:
+ background: "#d1e7dd"
+ text: "#0f5132"
+ border: "#198754"
+ description: "Green"
+ medium:
+ background: "#fff3cd"
+ text: "#856404"
+ border: "#ffc107"
+ description: "Amber"
+ poor:
+ background: "#f8d7da"
+ text: "#721c24"
+ border: "#dc3545"
+ description: "Coral"
+
+table_styling:
+ header_bg: "#f8f9fa"
+ header_text: "#495057"
+ border: "#dee2e6"
+ caption_color: "#6c757d"
+ hover_bg: "#f5f5f5"
+
+coverage_thresholds:
+ good: 0.05 # Within 5% of nominal level
+ medium: 0.1 # Within 10% of nominal level
+ poor: 1.0 # Beyond 10% of nominal level
diff --git a/monte-cover/pyproject.toml b/monte-cover/pyproject.toml
index 357e36c..547bf24 100644
--- a/monte-cover/pyproject.toml
+++ b/monte-cover/pyproject.toml
@@ -9,7 +9,7 @@ authors = [
requires-python = ">=3.12"
dependencies = [
"black>=25.1.0",
- "doubleml[rdd]>=0.9.3",
+ "doubleml[rdd]>=0.10.0",
"ipykernel>=6.29.5",
"itables>=2.2.5",
"joblib>=1.4.2",
@@ -19,6 +19,7 @@ dependencies = [
"pyyaml>=6.0.2",
"ruff>=0.11.0",
"scikit-learn>=1.5.2",
+ "pre-commit>=4.2.0",
]
[project.scripts]
diff --git a/monte-cover/src/montecover/base.py b/monte-cover/src/montecover/base.py
index 957f0fe..1695e2e 100644
--- a/monte-cover/src/montecover/base.py
+++ b/monte-cover/src/montecover/base.py
@@ -161,7 +161,7 @@ def save_config(self, output_path: str):
self.logger.warning(f"Adding .yaml extension to output path: {output_path}")
with open(output_path, "w") as file:
- yaml.dump(self.config, file)
+ yaml.dump(self.config, file, sort_keys=False, default_flow_style=False, indent=2, allow_unicode=True)
self.logger.info(f"Configuration saved to {output_path}")
@@ -333,9 +333,9 @@ def _compute_coverage(thetas, oracle_thetas, confint, joint_confint=None):
if joint_confint is not None:
joint_lower_bound = joint_confint.iloc[:, 0]
joint_upper_bound = joint_confint.iloc[:, 1]
- joint_coverage_mark = (joint_lower_bound < oracle_thetas) & (oracle_thetas < joint_upper_bound)
+ joint_coverage_mask = (joint_lower_bound < oracle_thetas) & (oracle_thetas < joint_upper_bound)
- result_dict["Uniform Coverage"] = np.all(joint_coverage_mark)
+ result_dict["Uniform Coverage"] = np.all(joint_coverage_mask)
result_dict["Uniform CI Length"] = np.mean(joint_upper_bound - joint_lower_bound)
return result_dict
diff --git a/monte-cover/src/montecover/did/__init__.py b/monte-cover/src/montecover/did/__init__.py
index 5aecac3..e14a6dd 100644
--- a/monte-cover/src/montecover/did/__init__.py
+++ b/monte-cover/src/montecover/did/__init__.py
@@ -1,5 +1,6 @@
"""Monte Carlo coverage simulations for DiD."""
+from montecover.did.did_cs_multi import DIDCSMultiCoverageSimulation
from montecover.did.did_pa_multi import DIDMultiCoverageSimulation
-__all__ = ["DIDMultiCoverageSimulation"]
+__all__ = ["DIDMultiCoverageSimulation", "DIDCSMultiCoverageSimulation"]
diff --git a/monte-cover/src/montecover/did/did_cs_multi.py b/monte-cover/src/montecover/did/did_cs_multi.py
new file mode 100644
index 0000000..ea11cd2
--- /dev/null
+++ b/monte-cover/src/montecover/did/did_cs_multi.py
@@ -0,0 +1,182 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.did.datasets import make_did_cs_CS2021
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class DIDCSMultiCoverageSimulation(BaseSimulation):
+ """Simulation study for coverage properties of DoubleMLDIDMulti."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Additional results storage for aggregated results
+ self.results_aggregated = []
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ self.oracle_values = dict()
+ # Oracle values
+ df_oracle = make_did_cs_CS2021(
+ n_obs=int(1e6),
+ dgp_type=1,
+ lambda_t=self.dgp_parameters["lambda_t"][0],
+ ) # does not depend on the DGP type or lambda_t
+ df_oracle["ite"] = df_oracle["y1"] - df_oracle["y0"]
+ self.oracle_values["detailed"] = df_oracle.groupby(["d", "t"])["ite"].mean().reset_index()
+
+ # Oracle group aggregation
+ df_oracle_post_treatment = df_oracle[df_oracle["t"] >= df_oracle["d"]]
+ self.oracle_values["group"] = df_oracle_post_treatment.groupby("d")["ite"].mean()
+
+ # Oracle time aggregation
+ self.oracle_values["time"] = df_oracle_post_treatment.groupby("t")["ite"].mean()
+
+ # Oracle eventstudy aggregation
+ df_oracle["e"] = pd.to_datetime(df_oracle["t"]).values.astype("datetime64[M]") - pd.to_datetime(
+ df_oracle["d"]
+ ).values.astype("datetime64[M]")
+ self.oracle_values["eventstudy"] = df_oracle.groupby("e")["ite"].mean()[1:]
+
+ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ score = dml_params["score"]
+ in_sample_normalization = dml_params["in_sample_normalization"]
+
+ # Model
+ dml_model = dml.did.DoubleMLDIDMulti(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=None if score == "experimental" else ml_m,
+ gt_combinations="standard",
+ score=score,
+ panel=False,
+ in_sample_normalization=in_sample_normalization,
+ )
+ dml_model.fit()
+ dml_model.bootstrap(n_rep_boot=2000)
+
+ # Oracle values for this model
+ oracle_thetas = np.full_like(dml_model.coef, np.nan)
+ for i, (g, _, t) in enumerate(dml_model.gt_combinations):
+ group_index = self.oracle_values["detailed"]["d"] == g
+ time_index = self.oracle_values["detailed"]["t"] == t
+ oracle_thetas[i] = self.oracle_values["detailed"][group_index & time_index]["ite"].iloc[0]
+
+ result = {
+ "detailed": [],
+ "group": [],
+ "time": [],
+ "eventstudy": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["detailed"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=oracle_thetas,
+ confint=dml_model.confint(level=level),
+ joint_confint=dml_model.confint(level=level, joint=True),
+ )
+
+ for aggregation_method in ["group", "time", "eventstudy"]:
+ agg_obj = dml_model.aggregate(aggregation=aggregation_method)
+ agg_obj.aggregated_frameworks.bootstrap(n_rep_boot=2000)
+
+ level_result[aggregation_method] = self._compute_coverage(
+ thetas=agg_obj.aggregated_frameworks.thetas,
+ oracle_thetas=self.oracle_values[aggregation_method].values,
+ confint=agg_obj.aggregated_frameworks.confint(level=level),
+ joint_confint=agg_obj.aggregated_frameworks.confint(level=level, joint=True),
+ )
+
+ # add parameters to the result
+ for res in level_result.values():
+ res.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Score": score,
+ "In-sample-norm.": in_sample_normalization,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ groupby_cols = [
+ "Learner g",
+ "Learner m",
+ "Score",
+ "In-sample-norm.",
+ "DGP",
+ "level",
+ ]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.data.DoubleMLPanelData:
+ """Generate data for the simulation."""
+ data = make_did_cs_CS2021(n_obs=dgp_params["n_obs"], dgp_type=dgp_params["DGP"], lambda_t=dgp_params["lambda_t"])
+ dml_data = dml.data.DoubleMLPanelData(
+ data,
+ y_col="y",
+ d_cols="d",
+ id_col="id",
+ t_col="t",
+ x_cols=["Z1", "Z2", "Z3", "Z4"],
+ )
+ return dml_data
diff --git a/monte-cover/src/montecover/did/did_pa_multi.py b/monte-cover/src/montecover/did/did_pa_multi.py
index 2c4e601..eb84934 100644
--- a/monte-cover/src/montecover/did/did_pa_multi.py
+++ b/monte-cover/src/montecover/did/did_pa_multi.py
@@ -4,10 +4,9 @@
import numpy as np
import pandas as pd
from doubleml.did.datasets import make_did_CS2021
-from lightgbm import LGBMClassifier, LGBMRegressor
-from sklearn.linear_model import LinearRegression, LogisticRegression
from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
class DIDMultiCoverageSimulation(BaseSimulation):
@@ -36,27 +35,13 @@ def __init__(
def _process_config_parameters(self):
"""Process simulation-specific parameters from config"""
# Process ML models in parameter grid
-
+ # Process ML models in parameter grid
assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
for learner in self.dml_parameters["learners"]:
- assert "ml_g" in learner, "No ml_g specified in the config file"
- assert "ml_m" in learner, "No ml_m specified in the config file"
-
- # Convert ml_g strings to actual objects
- if learner["ml_g"][0] == "Linear":
- learner["ml_g"] = ("Linear", LinearRegression())
- elif learner["ml_g"][0] == "LGBM":
- learner["ml_g"] = ("LGBM", LGBMRegressor(n_estimators=500, learning_rate=0.02, verbose=-1, n_jobs=1))
- else:
- raise ValueError(f"Unknown learner type: {learner['ml_g']}")
-
- # Convert ml_m strings to actual objects
- if learner["ml_m"][0] == "Linear":
- learner["ml_m"] = ("Linear", LogisticRegression())
- elif learner["ml_m"][0] == "LGBM":
- learner["ml_m"] = ("LGBM", LGBMClassifier(n_estimators=500, learning_rate=0.02, verbose=-1, n_jobs=1))
- else:
- raise ValueError(f"Unknown learner type: {learner['ml_m']}")
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
def _calculate_oracle_values(self):
"""Calculate oracle values for the simulation."""
@@ -64,28 +49,35 @@ def _calculate_oracle_values(self):
self.oracle_values = dict()
# Oracle values
- df_oracle = make_did_CS2021(n_obs=int(1e6), dgp_type=1) # does not depend on the DGP type
+ df_oracle = make_did_CS2021(
+ n_obs=int(1e6), dgp_type=1
+ ) # does not depend on the DGP type
df_oracle["ite"] = df_oracle["y1"] - df_oracle["y0"]
- self.oracle_values["detailed"] = df_oracle.groupby(["d", "t"])["ite"].mean().reset_index()
+ self.oracle_values["detailed"] = (
+ df_oracle.groupby(["d", "t"])["ite"].mean().reset_index()
+ )
# Oracle group aggregation
df_oracle_post_treatment = df_oracle[df_oracle["t"] >= df_oracle["d"]]
- self.oracle_values["group"] = df_oracle_post_treatment.groupby("d")["ite"].mean()
+ self.oracle_values["group"] = df_oracle_post_treatment.groupby("d")[
+ "ite"
+ ].mean()
# Oracle time aggregation
self.oracle_values["time"] = df_oracle_post_treatment.groupby("t")["ite"].mean()
# Oracle eventstudy aggregation
- df_oracle["e"] = pd.to_datetime(df_oracle["t"]).values.astype("datetime64[M]") - pd.to_datetime(
- df_oracle["d"]
- ).values.astype("datetime64[M]")
+ df_oracle["e"] = pd.to_datetime(df_oracle["t"]).values.astype(
+ "datetime64[M]"
+ ) - pd.to_datetime(df_oracle["d"]).values.astype("datetime64[M]")
self.oracle_values["eventstudy"] = df_oracle.groupby("e")["ite"].mean()[1:]
def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
"""Run a single repetition with the given parameters."""
# Extract parameters
- learner_g_name, ml_g = dml_params["learners"]["ml_g"]
- learner_m_name, ml_m = dml_params["learners"]["ml_m"]
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
score = dml_params["score"]
in_sample_normalization = dml_params["in_sample_normalization"]
@@ -106,7 +98,9 @@ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
for i, (g, _, t) in enumerate(dml_model.gt_combinations):
group_index = self.oracle_values["detailed"]["d"] == g
time_index = self.oracle_values["detailed"]["t"] == t
- oracle_thetas[i] = self.oracle_values["detailed"][group_index & time_index]["ite"].iloc[0]
+ oracle_thetas[i] = self.oracle_values["detailed"][group_index & time_index][
+ "ite"
+ ].iloc[0]
result = {
"detailed": [],
@@ -131,7 +125,9 @@ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
thetas=agg_obj.aggregated_frameworks.thetas,
oracle_thetas=self.oracle_values[aggregation_method].values,
confint=agg_obj.aggregated_frameworks.confint(level=level),
- joint_confint=agg_obj.aggregated_frameworks.confint(level=level, joint=True),
+ joint_confint=agg_obj.aggregated_frameworks.confint(
+ level=level, joint=True
+ ),
)
# add parameters to the result
@@ -154,7 +150,14 @@ def summarize_results(self):
"""Summarize the simulation results."""
self.logger.info("Summarizing simulation results")
- groupby_cols = ["Learner g", "Learner m", "Score", "In-sample-norm.", "DGP", "level"]
+ groupby_cols = [
+ "Learner g",
+ "Learner m",
+ "Score",
+ "In-sample-norm.",
+ "DGP",
+ "level",
+ ]
aggregation_dict = {
"Coverage": "mean",
"CI Length": "mean",
@@ -166,7 +169,9 @@ def summarize_results(self):
result_summary = dict()
for result_name, result_df in self.results.items():
- result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ result_summary[result_name] = (
+ result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ )
self.logger.debug(f"Summarized {result_name} results")
return result_summary
diff --git a/monte-cover/src/montecover/irm/__init__.py b/monte-cover/src/montecover/irm/__init__.py
new file mode 100644
index 0000000..6c09726
--- /dev/null
+++ b/monte-cover/src/montecover/irm/__init__.py
@@ -0,0 +1,29 @@
+"""Monte Carlo coverage simulations for IRM."""
+
+from montecover.irm.apo import APOCoverageSimulation
+from montecover.irm.apos import APOSCoverageSimulation
+from montecover.irm.cvar import CVARCoverageSimulation
+from montecover.irm.iivm_late import IIVMLATECoverageSimulation
+from montecover.irm.irm_ate import IRMATECoverageSimulation
+from montecover.irm.irm_ate_sensitivity import IRMATESensitivityCoverageSimulation
+from montecover.irm.irm_atte import IRMATTECoverageSimulation
+from montecover.irm.irm_atte_sensitivity import IRMATTESensitivityCoverageSimulation
+from montecover.irm.irm_cate import IRMCATECoverageSimulation
+from montecover.irm.irm_gate import IRMGATECoverageSimulation
+from montecover.irm.lpq import LPQCoverageSimulation
+from montecover.irm.pq import PQCoverageSimulation
+
+__all__ = [
+ "APOCoverageSimulation",
+ "APOSCoverageSimulation",
+ "CVARCoverageSimulation",
+ "IRMATECoverageSimulation",
+ "IIVMLATECoverageSimulation",
+ "IRMATESensitivityCoverageSimulation",
+ "IRMATTECoverageSimulation",
+ "IRMATTESensitivityCoverageSimulation",
+ "IRMCATECoverageSimulation",
+ "IRMGATECoverageSimulation",
+ "LPQCoverageSimulation",
+ "PQCoverageSimulation",
+]
diff --git a/monte-cover/src/montecover/irm/apo.py b/monte-cover/src/montecover/irm/apo.py
new file mode 100644
index 0000000..b887b7d
--- /dev/null
+++ b/monte-cover/src/montecover/irm/apo.py
@@ -0,0 +1,152 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.datasets import make_irm_data_discrete_treatments
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class APOCoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLAPOs for APO estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ n_levels = self.dgp_parameters["n_levels"][0]
+ data_apo_oracle = make_irm_data_discrete_treatments(
+ n_obs=int(1e6), n_levels=n_levels, linear=self.dgp_parameters["linear"][0]
+ )
+
+ y0 = data_apo_oracle["oracle_values"]["y0"]
+ ite = data_apo_oracle["oracle_values"]["ite"]
+ d = data_apo_oracle["d"]
+
+ average_ites = np.full(n_levels + 1, np.nan)
+ apos = np.full(n_levels + 1, np.nan)
+ for i in range(n_levels + 1):
+ average_ites[i] = np.mean(ite[d == i]) * (i > 0)
+ apos[i] = np.mean(y0) + average_ites[i]
+
+ ates = np.full(n_levels, np.nan)
+ for i in range(n_levels):
+ ates[i] = apos[i + 1] - apos[0]
+
+ self.logger.info(f"Levels and their counts:\n{np.unique(d, return_counts=True)}")
+ self.logger.info(f"True APOs: {apos}")
+ self.logger.info(f"True ATEs: {ates}")
+
+ self.oracle_values = dict()
+ self.oracle_values["apos"] = apos
+ self.oracle_values["ates"] = ates
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ treatment_level = dml_params["treatment_level"]
+ trimming_threshold = dml_params["trimming_threshold"]
+
+ # Model
+ dml_model = dml.DoubleMLAPO(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ treatment_level=treatment_level,
+ trimming_threshold=trimming_threshold,
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["apos"][treatment_level],
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Treatment Level": treatment_level,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "Treatment Level", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_irm_data_discrete_treatments(
+ n_obs=dgp_params["n_obs"],
+ n_levels=dgp_params["n_levels"],
+ linear=dgp_params["linear"],
+ )
+ df_apo = pd.DataFrame(
+ np.column_stack((data["y"], data["d"], data["x"])),
+ columns=["y", "d"] + ["x" + str(i) for i in range(data["x"].shape[1])],
+ )
+ dml_data = dml.DoubleMLData(df_apo, "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/apos.py b/monte-cover/src/montecover/irm/apos.py
new file mode 100644
index 0000000..4b19deb
--- /dev/null
+++ b/monte-cover/src/montecover/irm/apos.py
@@ -0,0 +1,164 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.datasets import make_irm_data_discrete_treatments
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class APOSCoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLAPOs for APO estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ n_levels = self.dgp_parameters["n_levels"][0]
+ data_apo_oracle = make_irm_data_discrete_treatments(
+ n_obs=int(1e6), n_levels=n_levels, linear=self.dgp_parameters["linear"][0]
+ )
+
+ y0 = data_apo_oracle["oracle_values"]["y0"]
+ ite = data_apo_oracle["oracle_values"]["ite"]
+ d = data_apo_oracle["d"]
+
+ average_ites = np.full(n_levels + 1, np.nan)
+ apos = np.full(n_levels + 1, np.nan)
+ for i in range(n_levels + 1):
+ average_ites[i] = np.mean(ite[d == i]) * (i > 0)
+ apos[i] = np.mean(y0) + average_ites[i]
+
+ ates = np.full(n_levels, np.nan)
+ for i in range(n_levels):
+ ates[i] = apos[i + 1] - apos[0]
+
+ self.logger.info(f"Levels and their counts:\n{np.unique(d, return_counts=True)}")
+ self.logger.info(f"True APOs: {apos}")
+ self.logger.info(f"True ATEs: {ates}")
+
+ self.oracle_values = dict()
+ self.oracle_values["apos"] = apos
+ self.oracle_values["ates"] = ates
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ treatment_levels = dml_params["treatment_levels"]
+ trimming_threshold = dml_params["trimming_threshold"]
+
+ # Model
+ dml_model = dml.DoubleMLAPOS(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ treatment_levels=treatment_levels,
+ trimming_threshold=trimming_threshold,
+ )
+ dml_model.fit()
+ dml_model.bootstrap(n_rep_boot=2000)
+
+ causal_contrast_model = dml_model.causal_contrast(reference_levels=0)
+ causal_contrast_model.bootstrap(n_rep_boot=2000)
+
+ result = {
+ "coverage": [],
+ "causal_contrast": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["apos"],
+ confint=dml_model.confint(level=level),
+ joint_confint=dml_model.confint(level=level, joint=True),
+ )
+ level_result["causal_contrast"] = self._compute_coverage(
+ thetas=causal_contrast_model.thetas,
+ oracle_thetas=self.oracle_values["ates"],
+ confint=causal_contrast_model.confint(level=level),
+ joint_confint=causal_contrast_model.confint(level=level, joint=True),
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_irm_data_discrete_treatments(
+ n_obs=dgp_params["n_obs"],
+ n_levels=dgp_params["n_levels"],
+ linear=dgp_params["linear"],
+ )
+ df_apo = pd.DataFrame(
+ np.column_stack((data["y"], data["d"], data["x"])),
+ columns=["y", "d"] + ["x" + str(i) for i in range(data["x"].shape[1])],
+ )
+ dml_data = dml.DoubleMLData(df_apo, "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/cvar.py b/monte-cover/src/montecover/irm/cvar.py
new file mode 100644
index 0000000..19180c0
--- /dev/null
+++ b/monte-cover/src/montecover/irm/cvar.py
@@ -0,0 +1,214 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+# define loc-scale model
+def f_loc(D, X):
+ loc = 0.5 * D + 2 * D * X[:, 4] + 2.0 * (X[:, 1] > 0.1) - 1.7 * (X[:, 0] * X[:, 2] > 0) - 3 * X[:, 3]
+ return loc
+
+
+def f_scale(D, X):
+ scale = np.sqrt(0.5 * D + 0.3 * D * X[:, 1] + 2)
+ return scale
+
+
+def dgp(n=200, p=5):
+ X = np.random.uniform(-1, 1, size=[n, p])
+ D = ((X[:, 1] - X[:, 3] + 1.5 * (X[:, 0] > 0) + np.random.normal(size=n)) > 0) * 1.0
+ epsilon = np.random.normal(size=n)
+
+ Y = f_loc(D, X) + f_scale(D, X) * epsilon
+ return Y, X, D, epsilon
+
+
+class CVARCoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLCVAR for Conditional Value at Risk estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ # Parameters
+ n_true = int(10e6)
+ tau_vec = self.dml_parameters["tau_vec"][0]
+ p = self.dgp_parameters["dim_x"][0]
+
+ _, X_true, _, epsilon_true = dgp(n=n_true, p=p)
+ D1 = np.ones(n_true)
+ D0 = np.zeros(n_true)
+
+ Y1 = f_loc(D1, X_true) + f_scale(D1, X_true) * epsilon_true
+ Y0 = f_loc(D0, X_true) + f_scale(D0, X_true) * epsilon_true
+
+ Y1_quant = np.quantile(Y1, q=tau_vec)
+ Y0_quant = np.quantile(Y0, q=tau_vec)
+ Y1_cvar = [Y1[Y1 >= quant].mean() for quant in Y1_quant]
+ Y0_cvar = [Y0[Y0 >= quant].mean() for quant in Y0_quant]
+ effect_cvar = np.array(Y1_cvar) - np.array(Y0_cvar)
+
+ self.oracle_values = dict()
+ self.oracle_values["effect_cvar"] = effect_cvar
+ self.oracle_values["Y1_cvar"] = Y1_cvar
+ self.oracle_values["Y0_cvar"] = Y0_cvar
+
+ self.logger.info(f"Oracle values: {self.oracle_values}")
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ tau_vec = dml_params["tau_vec"]
+ trimming_threshold = dml_params["trimming_threshold"]
+ Y0_cvar = self.oracle_values["Y0_cvar"]
+ Y1_cvar = self.oracle_values["Y1_cvar"]
+ effect_cvar = self.oracle_values["effect_cvar"]
+
+ # Model
+ dml_model = dml.DoubleMLQTE(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ score="CVaR",
+ quantiles=tau_vec,
+ trimming_threshold=trimming_threshold,
+ )
+ dml_model.fit()
+ dml_model.bootstrap(n_rep_boot=2000)
+
+ result = {
+ "Y0_coverage": [],
+ "Y1_coverage": [],
+ "effect_coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["effect_coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=effect_cvar,
+ confint=dml_model.confint(level=level),
+ joint_confint=dml_model.confint(level=level, joint=True),
+ )
+
+ Y0_estimates = np.full(len(tau_vec), np.nan)
+ Y1_estimates = np.full(len(tau_vec), np.nan)
+
+ Y0_confint = np.full((len(tau_vec), 2), np.nan)
+ Y1_confint = np.full((len(tau_vec), 2), np.nan)
+
+ for tau_idx in range(len(tau_vec)):
+ model_Y0 = dml_model.modellist_0[tau_idx]
+ model_Y1 = dml_model.modellist_1[tau_idx]
+
+ Y0_estimates[tau_idx] = model_Y0.coef
+ Y1_estimates[tau_idx] = model_Y1.coef
+
+ Y0_confint[tau_idx, :] = model_Y0.confint(level=level)
+ Y1_confint[tau_idx, :] = model_Y1.confint(level=level)
+
+ Y0_confint_df = pd.DataFrame(Y0_confint, columns=["lower", "upper"])
+ Y1_confint_df = pd.DataFrame(Y1_confint, columns=["lower", "upper"])
+
+ level_result["Y0_coverage"] = self._compute_coverage(
+ thetas=Y0_estimates,
+ oracle_thetas=Y0_cvar,
+ confint=Y0_confint_df,
+ joint_confint=None,
+ )
+
+ level_result["Y1_coverage"] = self._compute_coverage(
+ thetas=Y1_estimates,
+ oracle_thetas=Y1_cvar,
+ confint=Y1_confint_df,
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ result_summary = dict()
+ # Aggregate results for Y0 and Y1
+ for result_name in ["Y0_coverage", "Y1_coverage"]:
+ df = self.results[result_name]
+ result_summary[result_name] = df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ uniform_aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+ result_summary["effect_coverage"] = (
+ self.results["effect_coverage"].groupby(groupby_cols).agg(uniform_aggregation_dict).reset_index()
+ )
+ self.logger.debug("Summarized effect_coverage results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ Y, X, D, _ = dgp(n=dgp_params["n_obs"], p=dgp_params["dim_x"])
+ dml_data = dml.DoubleMLData.from_arrays(X, Y, D)
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/iivm_late.py b/monte-cover/src/montecover/irm/iivm_late.py
new file mode 100644
index 0000000..2f1ac1f
--- /dev/null
+++ b/monte-cover/src/montecover/irm/iivm_late.py
@@ -0,0 +1,122 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+from doubleml.datasets import make_iivm_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class IIVMLATECoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLIIVM for LATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m", "ml_r"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ self.oracle_values = dict()
+ self.oracle_values["theta"] = self.dgp_parameters["theta"]
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ learner_r_name, ml_r = create_learner_from_config(learner_config["ml_r"])
+
+ # Model
+ dml_model = dml.DoubleMLIIVM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ ml_r=ml_r,
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["theta"],
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Learner r": learner_r_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "Learner r", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_iivm_data(
+ theta=dgp_params["theta"],
+ n_obs=dgp_params["n_obs"],
+ dim_x=dgp_params["dim_x"],
+ alpha_x=dgp_params["alpha_x"],
+ return_type="DataFrame",
+ )
+ dml_data = dml.DoubleMLData(data, "y", "d", z_cols="z")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/irm_ate.py b/monte-cover/src/montecover/irm/irm_ate.py
new file mode 100644
index 0000000..09b3f83
--- /dev/null
+++ b/monte-cover/src/montecover/irm/irm_ate.py
@@ -0,0 +1,118 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+from doubleml.datasets import make_irm_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class IRMATECoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLIRM for ATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ self.oracle_values = dict()
+ self.oracle_values["theta"] = self.dgp_parameters["theta"]
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+
+ # Model
+ dml_model = dml.DoubleMLIRM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["theta"],
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_irm_data(
+ theta=dgp_params["theta"],
+ n_obs=dgp_params["n_obs"],
+ dim_x=dgp_params["dim_x"],
+ return_type="DataFrame",
+ )
+ dml_data = dml.DoubleMLData(data, "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/irm_ate_sensitivity.py b/monte-cover/src/montecover/irm/irm_ate_sensitivity.py
new file mode 100644
index 0000000..09ca004
--- /dev/null
+++ b/monte-cover/src/montecover/irm/irm_ate_sensitivity.py
@@ -0,0 +1,172 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.datasets import make_confounded_irm_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class IRMATESensitivityCoverageSimulation(BaseSimulation):
+ """Simulation class for sensitivity properties of DoubleMLIRM for ATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ dgp_dict = make_confounded_irm_data(
+ n_obs=int(1e6),
+ theta=self.dgp_parameters["theta"][0],
+ gamma_a=self.dgp_parameters["gamma_a"][0],
+ beta_a=self.dgp_parameters["beta_a"][0],
+ var_epsilon_y=self.dgp_parameters["var_epsilon_y"][0],
+ trimming_threshold=self.dgp_parameters["trimming_threshold"][0],
+ linear=self.dgp_parameters["linear"][0],
+ )
+
+ self.oracle_values = {
+ "theta": self.dgp_parameters["theta"],
+ "cf_y": dgp_dict["oracle_values"]["cf_y"],
+ "cf_d": dgp_dict["oracle_values"]["cf_d_ate"],
+ "rho": dgp_dict["oracle_values"]["rho_ate"],
+ }
+ self.logger.info(f"Oracle values: {self.oracle_values}")
+
+ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ trimming_threshold = dml_params["trimming_threshold"]
+ theta = self.oracle_values["theta"][0]
+
+ # Model
+ dml_model = dml.DoubleMLIRM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ score="ATE",
+ trimming_threshold=trimming_threshold,
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=theta,
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # sensitvity analysis
+ dml_model.sensitivity_analysis(
+ cf_y=self.oracle_values["cf_y"],
+ cf_d=self.oracle_values["cf_d"],
+ rho=self.oracle_values["rho"],
+ level=level,
+ null_hypothesis=theta,
+ )
+ sensitivity_results = {
+ "Coverage (Lower)": theta >= dml_model.sensitivity_params["ci"]["lower"][0],
+ "Coverage (Upper)": theta <= dml_model.sensitivity_params["ci"]["upper"][0],
+ "RV": dml_model.sensitivity_params["rv"][0],
+ "RVa": dml_model.sensitivity_params["rva"][0],
+ "Bias (Lower)": abs(theta - dml_model.sensitivity_params["theta"]["lower"][0]),
+ "Bias (Upper)": abs(theta - dml_model.sensitivity_params["theta"]["upper"][0]),
+ }
+ # add sensitivity results to the level result coverage
+ level_result["coverage"].update(sensitivity_results)
+
+ # add parameters to the result
+ for res in level_result.values():
+ res.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Coverage (Lower)": "mean",
+ "Coverage (Upper)": "mean",
+ "RV": "mean",
+ "RVa": "mean",
+ "Bias (Lower)": "mean",
+ "Bias (Upper)": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ dgp_dict = make_confounded_irm_data(
+ n_obs=dgp_params["n_obs"],
+ theta=dgp_params["theta"],
+ gamma_a=dgp_params["gamma_a"],
+ beta_a=dgp_params["beta_a"],
+ var_epsilon_y=dgp_params["var_epsilon_y"],
+ trimming_threshold=dgp_params["trimming_threshold"],
+ linear=dgp_params["linear"],
+ )
+ x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])]
+ df = pd.DataFrame(
+ np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])),
+ columns=x_cols + ["y", "d"],
+ )
+ dml_data = dml.DoubleMLData(df, "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/irm_atte.py b/monte-cover/src/montecover/irm/irm_atte.py
new file mode 100644
index 0000000..4dbb449
--- /dev/null
+++ b/monte-cover/src/montecover/irm/irm_atte.py
@@ -0,0 +1,161 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+from doubleml.datasets import make_irm_data
+from scipy.linalg import toeplitz
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class IRMATTECoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLIRM for ATTE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ theta = self.dgp_parameters["theta"][0]
+ dim_x = self.dgp_parameters["dim_x"][0]
+
+ n_obs_atte = int(1e6)
+ R2_d = 0.5
+ R2_y = 0.5
+
+ v = np.random.uniform(
+ size=[
+ n_obs_atte,
+ ]
+ )
+ zeta = np.random.standard_normal(
+ size=[
+ n_obs_atte,
+ ]
+ )
+
+ cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)])
+ x = np.random.multivariate_normal(
+ np.zeros(dim_x),
+ cov_mat,
+ size=[
+ n_obs_atte,
+ ],
+ )
+
+ beta = [1 / (k**2) for k in range(1, dim_x + 1)]
+ b_sigma_b = np.dot(np.dot(cov_mat, beta), beta)
+ c_y = np.sqrt(R2_y / ((1 - R2_y) * b_sigma_b))
+ c_d = np.sqrt(np.pi**2 / 3.0 * R2_d / ((1 - R2_d) * b_sigma_b))
+
+ xx = np.exp(np.dot(x, np.multiply(beta, c_d)))
+ d = 1.0 * ((xx / (1 + xx)) > v)
+
+ # y = d * theta + d * np.dot(x, np.multiply(beta, c_y)) + zeta
+ y0 = zeta
+ y1 = theta + np.dot(x, np.multiply(beta, c_y)) + zeta
+
+ self.oracle_values = dict()
+ self.oracle_values["theta"] = np.mean(y1[d == 1] - y0[d == 1])
+ self.logger.info(f"Oracle ATTE value: {self.oracle_values['theta']}")
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+
+ # Model
+ dml_model = dml.DoubleMLIRM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ score="ATTE",
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["theta"],
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_irm_data(
+ theta=dgp_params["theta"],
+ n_obs=dgp_params["n_obs"],
+ dim_x=dgp_params["dim_x"],
+ return_type="DataFrame",
+ )
+ dml_data = dml.DoubleMLData(data, "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/irm_atte_sensitivity.py b/monte-cover/src/montecover/irm/irm_atte_sensitivity.py
new file mode 100644
index 0000000..47ec91f
--- /dev/null
+++ b/monte-cover/src/montecover/irm/irm_atte_sensitivity.py
@@ -0,0 +1,172 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.datasets import make_confounded_irm_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class IRMATTESensitivityCoverageSimulation(BaseSimulation):
+ """Simulation class for sensitivity properties of DoubleMLIRM for ATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ dgp_dict = make_confounded_irm_data(
+ n_obs=int(1e6),
+ theta=self.dgp_parameters["theta"][0],
+ gamma_a=self.dgp_parameters["gamma_a"][0],
+ beta_a=self.dgp_parameters["beta_a"][0],
+ var_epsilon_y=self.dgp_parameters["var_epsilon_y"][0],
+ trimming_threshold=self.dgp_parameters["trimming_threshold"][0],
+ linear=self.dgp_parameters["linear"][0],
+ )
+
+ self.oracle_values = {
+ "theta": self.dgp_parameters["theta"],
+ "cf_y": dgp_dict["oracle_values"]["cf_y"],
+ "cf_d": dgp_dict["oracle_values"]["cf_d_atte"],
+ "rho": dgp_dict["oracle_values"]["rho_atte"],
+ }
+ self.logger.info(f"Oracle values: {self.oracle_values}")
+
+ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ trimming_threshold = dml_params["trimming_threshold"]
+ theta = self.oracle_values["theta"][0]
+
+ # Model
+ dml_model = dml.DoubleMLIRM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ score="ATTE",
+ trimming_threshold=trimming_threshold,
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=theta,
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # sensitvity analysis
+ dml_model.sensitivity_analysis(
+ cf_y=self.oracle_values["cf_y"],
+ cf_d=self.oracle_values["cf_d"],
+ rho=self.oracle_values["rho"],
+ level=level,
+ null_hypothesis=theta,
+ )
+ sensitivity_results = {
+ "Coverage (Lower)": theta >= dml_model.sensitivity_params["ci"]["lower"][0],
+ "Coverage (Upper)": theta <= dml_model.sensitivity_params["ci"]["upper"][0],
+ "RV": dml_model.sensitivity_params["rv"][0],
+ "RVa": dml_model.sensitivity_params["rva"][0],
+ "Bias (Lower)": abs(theta - dml_model.sensitivity_params["theta"]["lower"][0]),
+ "Bias (Upper)": abs(theta - dml_model.sensitivity_params["theta"]["upper"][0]),
+ }
+ # add sensitivity results to the level result coverage
+ level_result["coverage"].update(sensitivity_results)
+
+ # add parameters to the result
+ for res in level_result.values():
+ res.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Coverage (Lower)": "mean",
+ "Coverage (Upper)": "mean",
+ "RV": "mean",
+ "RVa": "mean",
+ "Bias (Lower)": "mean",
+ "Bias (Upper)": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ dgp_dict = make_confounded_irm_data(
+ n_obs=dgp_params["n_obs"],
+ theta=dgp_params["theta"],
+ gamma_a=dgp_params["gamma_a"],
+ beta_a=dgp_params["beta_a"],
+ var_epsilon_y=dgp_params["var_epsilon_y"],
+ trimming_threshold=dgp_params["trimming_threshold"],
+ linear=dgp_params["linear"],
+ )
+ x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])]
+ df = pd.DataFrame(
+ np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])),
+ columns=x_cols + ["y", "d"],
+ )
+ dml_data = dml.DoubleMLData(df, "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/irm_cate.py b/monte-cover/src/montecover/irm/irm_cate.py
new file mode 100644
index 0000000..73d5b97
--- /dev/null
+++ b/monte-cover/src/montecover/irm/irm_cate.py
@@ -0,0 +1,158 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+import patsy
+from doubleml.datasets import make_heterogeneous_data
+from sklearn.linear_model import LinearRegression
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class IRMCATECoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLIRM for CATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ # Oracle values
+ data_oracle = make_heterogeneous_data(
+ n_obs=int(1e6),
+ p=self.dgp_parameters["p"][0],
+ support_size=self.dgp_parameters["support_size"][0],
+ n_x=self.dgp_parameters["n_x"][0],
+ binary_treatment=True,
+ )
+
+ self.logger.info("Calculating oracle values")
+
+ design_matrix_oracle = patsy.dmatrix("bs(x, df=5, degree=2)", {"x": data_oracle["data"]["X_0"]})
+ spline_basis_oracle = pd.DataFrame(design_matrix_oracle)
+ oracle_model = LinearRegression()
+ oracle_model.fit(spline_basis_oracle, data_oracle["effects"])
+
+ # evaluate on grid
+ grid = {"x": np.linspace(0.1, 0.9, 100)}
+ spline_grid_oracle = pd.DataFrame(patsy.build_design_matrices([design_matrix_oracle.design_info], grid)[0])
+ oracle_cates = oracle_model.predict(spline_grid_oracle)
+
+ self.oracle_values = dict()
+ self.oracle_values["cates"] = oracle_cates
+ self.oracle_values["grid"] = grid
+
+ self.logger.info(f"Oracle values: {self.oracle_values}")
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+
+ # Model
+ dml_model = dml.DoubleMLIRM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ )
+ dml_model.fit()
+
+ # cate
+ design_matrix = patsy.dmatrix("bs(x, df=5, degree=2)", {"x": dml_data.data["X_0"]})
+ spline_basis = pd.DataFrame(design_matrix)
+ cate_model = dml_model.cate(basis=spline_basis)
+
+ # evaluation spline basis
+ spline_grid = pd.DataFrame(patsy.build_design_matrices([design_matrix.design_info], self.oracle_values["grid"])[0])
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ confint = cate_model.confint(basis=spline_grid, level=level)
+ effects = confint["effect"]
+ uniform_confint = cate_model.confint(basis=spline_grid, level=0.95, joint=True, n_rep_boot=2000)
+ level_result["coverage"] = self._compute_coverage(
+ thetas=effects,
+ oracle_thetas=self.oracle_values["cates"],
+ confint=confint.iloc[:, [0, 2]],
+ joint_confint=uniform_confint.iloc[:, [0, 2]],
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_heterogeneous_data(
+ n_obs=dgp_params["n_obs"],
+ p=dgp_params["p"],
+ support_size=dgp_params["support_size"],
+ n_x=dgp_params["n_x"],
+ binary_treatment=True,
+ )
+ dml_data = dml.DoubleMLData(data["data"], "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/irm_gate.py b/monte-cover/src/montecover/irm/irm_gate.py
new file mode 100644
index 0000000..64f72d3
--- /dev/null
+++ b/monte-cover/src/montecover/irm/irm_gate.py
@@ -0,0 +1,157 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.datasets import make_heterogeneous_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class IRMGATECoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLIRM for GATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _generate_groups(self, data):
+ """Generate groups for the simulation."""
+ groups = pd.DataFrame(
+ np.column_stack(
+ (
+ data["X_0"] <= 0.3,
+ (data["X_0"] > 0.3) & (data["X_0"] <= 0.7),
+ data["X_0"] > 0.7,
+ )
+ ),
+ columns=["Group 1", "Group 2", "Group 3"],
+ )
+ return groups
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ # Oracle values
+ data_oracle = make_heterogeneous_data(
+ n_obs=int(1e6),
+ p=self.dgp_parameters["p"][0],
+ support_size=self.dgp_parameters["support_size"][0],
+ n_x=self.dgp_parameters["n_x"][0],
+ binary_treatment=True,
+ )
+
+ self.logger.info("Calculating oracle values")
+ groups = self._generate_groups(data_oracle["data"])
+ oracle_gates = [data_oracle["effects"][groups[group]].mean() for group in groups.columns]
+
+ self.oracle_values = dict()
+ self.oracle_values["gates"] = oracle_gates
+
+ self.logger.info(f"Oracle values: {self.oracle_values}")
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+
+ # Model
+ dml_model = dml.DoubleMLIRM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ )
+ dml_model.fit()
+
+ # gate
+ groups = self._generate_groups(dml_data.data)
+ gate_model = dml_model.gate(groups=groups)
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ confint = gate_model.confint(level=level)
+ effects = confint["effect"]
+ uniform_confint = gate_model.confint(level=0.95, joint=True, n_rep_boot=2000)
+ level_result["coverage"] = self._compute_coverage(
+ thetas=effects,
+ oracle_thetas=self.oracle_values["gates"],
+ confint=confint.iloc[:, [0, 2]],
+ joint_confint=uniform_confint.iloc[:, [0, 2]],
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_heterogeneous_data(
+ n_obs=dgp_params["n_obs"],
+ p=dgp_params["p"],
+ support_size=dgp_params["support_size"],
+ n_x=dgp_params["n_x"],
+ binary_treatment=True,
+ )
+ dml_data = dml.DoubleMLData(data["data"], "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/lpq.py b/monte-cover/src/montecover/irm/lpq.py
new file mode 100644
index 0000000..86b66f3
--- /dev/null
+++ b/monte-cover/src/montecover/irm/lpq.py
@@ -0,0 +1,233 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+# define loc-scale model
+def f_loc(D, X, X_conf):
+ loc = 0.5 * D + 2 * D * X[:, 4] + 2.0 * (X[:, 1] > 0.1) - 1.7 * (X[:, 0] * X[:, 2] > 0) - 3 * X[:, 3] - 2 * X_conf[:, 0]
+ return loc
+
+
+def f_scale(D, X, X_conf):
+ scale = np.sqrt(0.5 * D + 3 * D * X[:, 0] + 0.4 * X_conf[:, 0] + 2)
+ return scale
+
+
+def generate_treatment(Z, X, X_conf):
+ eta = np.random.normal(size=len(Z))
+ d = ((0.5 * Z - 0.3 * X[:, 0] + 0.7 * X_conf[:, 0] + eta) > 0) * 1.0
+ return d
+
+
+def dgp(n=200, p=5):
+ X = np.random.uniform(0, 1, size=[n, p])
+ X_conf = np.random.uniform(-1, 1, size=[n, 1])
+ Z = np.random.binomial(1, p=0.5, size=n)
+ D = generate_treatment(Z, X, X_conf)
+ epsilon = np.random.normal(size=n)
+
+ Y = f_loc(D, X, X_conf) + f_scale(D, X, X_conf) * epsilon
+
+ return Y, X, D, Z
+
+
+class LPQCoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLQTE for local potential quantile estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ # Parameters
+ n_true = int(10e6)
+ tau_vec = self.dml_parameters["tau_vec"][0]
+ p = self.dgp_parameters["dim_x"][0]
+
+ X_true = np.random.uniform(0, 1, size=[n_true, p])
+ X_conf_true = np.random.uniform(-1, 1, size=[n_true, 1])
+ Z_true = np.random.binomial(1, p=0.5, size=n_true)
+ D1_true = generate_treatment(np.ones_like(Z_true), X_true, X_conf_true)
+ D0_true = generate_treatment(np.zeros_like(Z_true), X_true, X_conf_true)
+ epsilon_true = np.random.normal(size=n_true)
+
+ compliers = (D1_true == 1) * (D0_true == 0)
+ self.logger.info(f"Compliance probability: {str(compliers.mean())}")
+ n_compliers = compliers.sum()
+ Y1 = (
+ f_loc(np.ones(n_compliers), X_true[compliers, :], X_conf_true[compliers, :])
+ + f_scale(np.ones(n_compliers), X_true[compliers, :], X_conf_true[compliers, :]) * epsilon_true[compliers]
+ )
+ Y0 = (
+ f_loc(np.zeros(n_compliers), X_true[compliers, :], X_conf_true[compliers, :])
+ + f_scale(np.zeros(n_compliers), X_true[compliers, :], X_conf_true[compliers, :]) * epsilon_true[compliers]
+ )
+
+ Y0_quant = np.quantile(Y0, q=tau_vec)
+ Y1_quant = np.quantile(Y1, q=tau_vec)
+ effect_quant = Y1_quant - Y0_quant
+
+ self.oracle_values = dict()
+ self.oracle_values["Y0_quant"] = Y0_quant
+ self.oracle_values["Y1_quant"] = Y1_quant
+ self.oracle_values["effect_quant"] = effect_quant
+
+ self.logger.info(f"Oracle values: {self.oracle_values}")
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ tau_vec = dml_params["tau_vec"]
+ trimming_threshold = dml_params["trimming_threshold"]
+ Y0_quant = self.oracle_values["Y0_quant"]
+ Y1_quant = self.oracle_values["Y1_quant"]
+ effect_quant = self.oracle_values["effect_quant"]
+
+ # Model
+ dml_model = dml.DoubleMLQTE(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ score="LPQ",
+ quantiles=tau_vec,
+ trimming_threshold=trimming_threshold,
+ )
+ dml_model.fit()
+ dml_model.bootstrap(n_rep_boot=2000)
+
+ result = {
+ "Y0_coverage": [],
+ "Y1_coverage": [],
+ "effect_coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["effect_coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=effect_quant,
+ confint=dml_model.confint(level=level),
+ joint_confint=dml_model.confint(level=level, joint=True),
+ )
+
+ Y0_estimates = np.full(len(tau_vec), np.nan)
+ Y1_estimates = np.full(len(tau_vec), np.nan)
+
+ Y0_confint = np.full((len(tau_vec), 2), np.nan)
+ Y1_confint = np.full((len(tau_vec), 2), np.nan)
+
+ for tau_idx in range(len(tau_vec)):
+ model_Y0 = dml_model.modellist_0[tau_idx]
+ model_Y1 = dml_model.modellist_1[tau_idx]
+
+ Y0_estimates[tau_idx] = model_Y0.coef
+ Y1_estimates[tau_idx] = model_Y1.coef
+
+ Y0_confint[tau_idx, :] = model_Y0.confint(level=level)
+ Y1_confint[tau_idx, :] = model_Y1.confint(level=level)
+
+ Y0_confint_df = pd.DataFrame(Y0_confint, columns=["lower", "upper"])
+ Y1_confint_df = pd.DataFrame(Y1_confint, columns=["lower", "upper"])
+
+ level_result["Y0_coverage"] = self._compute_coverage(
+ thetas=Y0_estimates,
+ oracle_thetas=Y0_quant,
+ confint=Y0_confint_df,
+ joint_confint=None,
+ )
+
+ level_result["Y1_coverage"] = self._compute_coverage(
+ thetas=Y1_estimates,
+ oracle_thetas=Y1_quant,
+ confint=Y1_confint_df,
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ result_summary = dict()
+ # Aggregate results for Y0 and Y1
+ for result_name in ["Y0_coverage", "Y1_coverage"]:
+ df = self.results[result_name]
+ result_summary[result_name] = df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ uniform_aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+ result_summary["effect_coverage"] = (
+ self.results["effect_coverage"].groupby(groupby_cols).agg(uniform_aggregation_dict).reset_index()
+ )
+ self.logger.debug("Summarized effect_coverage results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ Y, X, D, Z = dgp(n=dgp_params["n_obs"], p=dgp_params["dim_x"])
+ dml_data = dml.DoubleMLData.from_arrays(X, Y, D, Z)
+ return dml_data
diff --git a/monte-cover/src/montecover/irm/pq.py b/monte-cover/src/montecover/irm/pq.py
new file mode 100644
index 0000000..f935dc3
--- /dev/null
+++ b/monte-cover/src/montecover/irm/pq.py
@@ -0,0 +1,212 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+# define loc-scale model
+def f_loc(D, X):
+ loc = 0.5 * D + 2 * D * X[:, 4] + 2.0 * (X[:, 1] > 0.1) - 1.7 * (X[:, 0] * X[:, 2] > 0) - 3 * X[:, 3]
+ return loc
+
+
+def f_scale(D, X):
+ scale = np.sqrt(0.5 * D + 0.3 * D * X[:, 1] + 2)
+ return scale
+
+
+def dgp(n=200, p=5):
+ X = np.random.uniform(-1, 1, size=[n, p])
+ D = ((X[:, 1] - X[:, 3] + 1.5 * (X[:, 0] > 0) + np.random.normal(size=n)) > 0) * 1.0
+ epsilon = np.random.normal(size=n)
+
+ Y = f_loc(D, X) + f_scale(D, X) * epsilon
+ return Y, X, D, epsilon
+
+
+class PQCoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLPQ for potential quantile estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ # Parameters
+ n_true = int(10e6)
+ tau_vec = self.dml_parameters["tau_vec"][0]
+ p = self.dgp_parameters["dim_x"][0]
+
+ _, X_true, _, epsilon_true = dgp(n=n_true, p=p)
+ D1 = np.ones(n_true)
+ D0 = np.zeros(n_true)
+
+ Y1 = f_loc(D1, X_true) + f_scale(D1, X_true) * epsilon_true
+ Y0 = f_loc(D0, X_true) + f_scale(D0, X_true) * epsilon_true
+
+ Y1_quant = np.quantile(Y1, q=tau_vec)
+ Y0_quant = np.quantile(Y0, q=tau_vec)
+ effect_quant = Y1_quant - Y0_quant
+
+ self.oracle_values = dict()
+ self.oracle_values["Y0_quant"] = Y0_quant
+ self.oracle_values["Y1_quant"] = Y1_quant
+ self.oracle_values["effect_quant"] = effect_quant
+
+ self.logger.info(f"Oracle values: {self.oracle_values}")
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ tau_vec = dml_params["tau_vec"]
+ trimming_threshold = dml_params["trimming_threshold"]
+ Y0_quant = self.oracle_values["Y0_quant"]
+ Y1_quant = self.oracle_values["Y1_quant"]
+ effect_quant = self.oracle_values["effect_quant"]
+
+ # Model
+ dml_model = dml.DoubleMLQTE(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ score="PQ",
+ quantiles=tau_vec,
+ trimming_threshold=trimming_threshold,
+ )
+ dml_model.fit()
+ dml_model.bootstrap(n_rep_boot=2000)
+
+ result = {
+ "Y0_coverage": [],
+ "Y1_coverage": [],
+ "effect_coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["effect_coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=effect_quant,
+ confint=dml_model.confint(level=level),
+ joint_confint=dml_model.confint(level=level, joint=True),
+ )
+
+ Y0_estimates = np.full(len(tau_vec), np.nan)
+ Y1_estimates = np.full(len(tau_vec), np.nan)
+
+ Y0_confint = np.full((len(tau_vec), 2), np.nan)
+ Y1_confint = np.full((len(tau_vec), 2), np.nan)
+
+ for tau_idx in range(len(tau_vec)):
+ model_Y0 = dml_model.modellist_0[tau_idx]
+ model_Y1 = dml_model.modellist_1[tau_idx]
+
+ Y0_estimates[tau_idx] = model_Y0.coef
+ Y1_estimates[tau_idx] = model_Y1.coef
+
+ Y0_confint[tau_idx, :] = model_Y0.confint(level=level)
+ Y1_confint[tau_idx, :] = model_Y1.confint(level=level)
+
+ Y0_confint_df = pd.DataFrame(Y0_confint, columns=["lower", "upper"])
+ Y1_confint_df = pd.DataFrame(Y1_confint, columns=["lower", "upper"])
+
+ level_result["Y0_coverage"] = self._compute_coverage(
+ thetas=Y0_estimates,
+ oracle_thetas=Y0_quant,
+ confint=Y0_confint_df,
+ joint_confint=None,
+ )
+
+ level_result["Y1_coverage"] = self._compute_coverage(
+ thetas=Y1_estimates,
+ oracle_thetas=Y1_quant,
+ confint=Y1_confint_df,
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ result_summary = dict()
+ # Aggregate results for Y0 and Y1
+ for result_name in ["Y0_coverage", "Y1_coverage"]:
+ df = self.results[result_name]
+ result_summary[result_name] = df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ uniform_aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+ result_summary["effect_coverage"] = (
+ self.results["effect_coverage"].groupby(groupby_cols).agg(uniform_aggregation_dict).reset_index()
+ )
+ self.logger.debug("Summarized effect_coverage results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ Y, X, D, _ = dgp(n=dgp_params["n_obs"], p=dgp_params["dim_x"])
+ dml_data = dml.DoubleMLData.from_arrays(X, Y, D)
+ return dml_data
diff --git a/monte-cover/src/montecover/plm/__init__.py b/monte-cover/src/montecover/plm/__init__.py
index f861d2b..167b36d 100644
--- a/monte-cover/src/montecover/plm/__init__.py
+++ b/monte-cover/src/montecover/plm/__init__.py
@@ -1,5 +1,15 @@
"""Monte Carlo coverage simulations for PLM."""
+from montecover.plm.pliv_late import PLIVLATECoverageSimulation
from montecover.plm.plr_ate import PLRATECoverageSimulation
+from montecover.plm.plr_ate_sensitivity import PLRATESensitivityCoverageSimulation
+from montecover.plm.plr_cate import PLRCATECoverageSimulation
+from montecover.plm.plr_gate import PLRGATECoverageSimulation
-__all__ = ["PLRATECoverageSimulation"]
+__all__ = [
+ "PLRATECoverageSimulation",
+ "PLIVLATECoverageSimulation",
+ "PLRGATECoverageSimulation",
+ "PLRCATECoverageSimulation",
+ "PLRATESensitivityCoverageSimulation",
+]
diff --git a/monte-cover/src/montecover/plm/pliv_late.py b/monte-cover/src/montecover/plm/pliv_late.py
new file mode 100644
index 0000000..862772a
--- /dev/null
+++ b/monte-cover/src/montecover/plm/pliv_late.py
@@ -0,0 +1,127 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+from doubleml.datasets import make_pliv_CHS2015
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class PLIVLATECoverageSimulation(BaseSimulation):
+ """Simulation class for PLIV LATE coverage."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m", "ml_r"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ self.oracle_values = dict()
+ self.oracle_values["theta"] = self.dgp_parameters["theta"]
+
+ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ learner_r_name, ml_r = create_learner_from_config(learner_config["ml_r"])
+ score = dml_params["score"]
+
+ # Model
+ dml_model = dml.DoubleMLPLIV(
+ obj_dml_data=dml_data,
+ ml_l=ml_g,
+ ml_m=ml_m,
+ ml_g=ml_g if score == "IV-type" else None,
+ ml_r=ml_r,
+ score=score,
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["theta"],
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res in level_result.values():
+ res.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Learner r": learner_r_name,
+ "Score": score,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "Learner r", "Score", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_pliv_CHS2015(
+ alpha=dgp_params["theta"],
+ n_obs=dgp_params["n_obs"],
+ dim_x=dgp_params["dim_x"],
+ dim_z=dgp_params["dim_z"],
+ return_type="DataFrame",
+ )
+ dml_data = dml.DoubleMLData(data, "y", "d", z_cols="Z1")
+ return dml_data
diff --git a/monte-cover/src/montecover/plm/plr_ate.py b/monte-cover/src/montecover/plm/plr_ate.py
index ac7f1c3..cdd3376 100644
--- a/monte-cover/src/montecover/plm/plr_ate.py
+++ b/monte-cover/src/montecover/plm/plr_ate.py
@@ -2,15 +2,13 @@
import doubleml as dml
from doubleml.datasets import make_plr_CCDDHNR2018
-from lightgbm import LGBMRegressor
-from sklearn.ensemble import RandomForestRegressor
-from sklearn.linear_model import LassoCV
from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
class PLRATECoverageSimulation(BaseSimulation):
- """Simulation study for coverage properties of DoubleMLPLR for ATE estimation."""
+ """Simulation class for coverage properties of DoubleMLPLR for ATE estimation."""
def __init__(
self,
@@ -26,9 +24,6 @@ def __init__(
log_file=log_file,
)
- # Additional results storage for aggregated results
- self.results_aggregated = []
-
# Calculate oracle values
self._calculate_oracle_values()
@@ -36,26 +31,11 @@ def _process_config_parameters(self):
"""Process simulation-specific parameters from config"""
# Process ML models in parameter grid
assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
for learner in self.dml_parameters["learners"]:
- assert "ml_g" in learner, "No ml_g specified in the config file"
- assert "ml_m" in learner, "No ml_m specified in the config file"
-
- # Convert ml_g strings to actual objects
- learner["ml_g"] = self._convert_ml_string_to_object(learner["ml_g"][0])
- learner["ml_m"] = self._convert_ml_string_to_object(learner["ml_m"][0])
-
- def _convert_ml_string_to_object(self, ml_string):
- """Convert a string to a machine learning object."""
- if ml_string == "Lasso":
- learner = LassoCV()
- elif ml_string == "Random Forest":
- learner = RandomForestRegressor(n_estimators=200, max_features=10, max_depth=5, min_samples_leaf=20)
- elif ml_string == "LGBM":
- learner = LGBMRegressor(n_estimators=500, learning_rate=0.01, verbose=-1, n_jobs=1)
- else:
- raise ValueError(f"Unknown learner type: {ml_string}")
-
- return (ml_string, learner)
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
def _calculate_oracle_values(self):
"""Calculate oracle values for the simulation."""
@@ -67,8 +47,9 @@ def _calculate_oracle_values(self):
def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
"""Run a single repetition with the given parameters."""
# Extract parameters
- learner_g_name, ml_g = dml_params["learners"]["ml_g"]
- learner_m_name, ml_m = dml_params["learners"]["ml_m"]
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
score = dml_params["score"]
# Model
@@ -132,7 +113,10 @@ def summarize_results(self):
def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
"""Generate data for the simulation."""
data = make_plr_CCDDHNR2018(
- alpha=dgp_params["theta"], n_obs=dgp_params["n_obs"], dim_x=dgp_params["dim_x"], return_type="DataFrame"
+ alpha=dgp_params["theta"],
+ n_obs=dgp_params["n_obs"],
+ dim_x=dgp_params["dim_x"],
+ return_type="DataFrame",
)
dml_data = dml.DoubleMLData(data, "y", "d")
return dml_data
diff --git a/monte-cover/src/montecover/plm/plr_ate_sensitivity.py b/monte-cover/src/montecover/plm/plr_ate_sensitivity.py
new file mode 100644
index 0000000..69a33f3
--- /dev/null
+++ b/monte-cover/src/montecover/plm/plr_ate_sensitivity.py
@@ -0,0 +1,181 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.datasets import make_confounded_plr_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class PLRATESensitivityCoverageSimulation(BaseSimulation):
+ """Simulation class for sensitivity properties of DoubleMLPLR for ATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ # hardcoded parameters for omitted confounders
+ cf_y = 0.1
+ cf_d = 0.1
+
+ np.random.seed(42)
+ dgp_dict = make_confounded_plr_data(n_obs=int(1e6), cf_y=cf_y, cf_d=cf_d, theta=self.dgp_parameters["theta"])
+ oracle_dict = dgp_dict["oracle_values"]
+ cf_y_test = np.mean(np.square(oracle_dict["g_long"] - oracle_dict["g_short"])) / np.mean(
+ np.square(dgp_dict["y"] - oracle_dict["g_short"])
+ )
+ self.logger.info(f"Input cf_y:{cf_y} \nCalculated cf_y: {round(cf_y_test, 5)}")
+
+ rr_long = (dgp_dict["d"] - oracle_dict["m_long"]) / np.mean(np.square(dgp_dict["d"] - oracle_dict["m_long"]))
+ rr_short = (dgp_dict["d"] - oracle_dict["m_short"]) / np.mean(np.square(dgp_dict["d"] - oracle_dict["m_short"]))
+ C2_D = (np.mean(np.square(rr_long)) - np.mean(np.square(rr_short))) / np.mean(np.square(rr_short))
+ cf_d_test = C2_D / (1 + C2_D)
+ self.logger.info(f"Input cf_d:{cf_d}\nCalculated cf_d: {round(cf_d_test, 5)}")
+
+ # compute the value for rho
+ rho = np.corrcoef((oracle_dict["g_long"] - oracle_dict["g_short"]), (rr_long - rr_short))[0, 1]
+ self.logger.info(f"Correlation rho: {round(rho, 5)}")
+
+ self.oracle_values = {
+ "theta": self.dgp_parameters["theta"],
+ "cf_y": cf_y,
+ "cf_d": cf_d,
+ "rho": rho,
+ }
+
+ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ score = dml_params["score"]
+ theta = self.oracle_values["theta"][0]
+
+ # Model
+ dml_model = dml.DoubleMLPLR(
+ obj_dml_data=dml_data,
+ ml_l=ml_g,
+ ml_m=ml_m,
+ ml_g=ml_g if score == "IV-type" else None,
+ score=score,
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=theta,
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # sensitvity analysis
+ dml_model.sensitivity_analysis(
+ cf_y=self.oracle_values["cf_y"],
+ cf_d=self.oracle_values["cf_d"],
+ rho=self.oracle_values["rho"],
+ level=level,
+ null_hypothesis=theta,
+ )
+ sensitivity_results = {
+ "Coverage (Lower)": theta >= dml_model.sensitivity_params["ci"]["lower"][0],
+ "Coverage (Upper)": theta <= dml_model.sensitivity_params["ci"]["upper"][0],
+ "RV": dml_model.sensitivity_params["rv"][0],
+ "RVa": dml_model.sensitivity_params["rva"][0],
+ "Bias (Lower)": abs(theta - dml_model.sensitivity_params["theta"]["lower"][0]),
+ "Bias (Upper)": abs(theta - dml_model.sensitivity_params["theta"]["upper"][0]),
+ }
+ # add sensitivity results to the level result coverage
+ level_result["coverage"].update(sensitivity_results)
+
+ # add parameters to the result
+ for res in level_result.values():
+ res.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Score": score,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "Score", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Coverage (Lower)": "mean",
+ "Coverage (Upper)": "mean",
+ "RV": "mean",
+ "RVa": "mean",
+ "Bias (Lower)": "mean",
+ "Bias (Upper)": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ dgp_dict = make_confounded_plr_data(
+ n_obs=dgp_params["n_obs"],
+ cf_y=self.oracle_values["cf_y"],
+ cf_d=self.oracle_values["cf_d"],
+ theta=dgp_params["theta"],
+ )
+ x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])]
+ df = pd.DataFrame(
+ np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])),
+ columns=x_cols + ["y", "d"],
+ )
+ dml_data = dml.DoubleMLData(df, "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/plm/plr_cate.py b/monte-cover/src/montecover/plm/plr_cate.py
new file mode 100644
index 0000000..71d47a3
--- /dev/null
+++ b/monte-cover/src/montecover/plm/plr_cate.py
@@ -0,0 +1,160 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+import patsy
+from doubleml.datasets import make_heterogeneous_data
+from sklearn.linear_model import LinearRegression
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class PLRCATECoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLPLR for CATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ # Oracle values
+ data_oracle = make_heterogeneous_data(
+ n_obs=int(1e6),
+ p=self.dgp_parameters["p"][0],
+ support_size=self.dgp_parameters["support_size"][0],
+ n_x=self.dgp_parameters["n_x"][0],
+ binary_treatment=False,
+ )
+
+ self.logger.info("Calculating oracle values")
+
+ design_matrix_oracle = patsy.dmatrix("bs(x, df=5, degree=2)", {"x": data_oracle["data"]["X_0"]})
+ spline_basis_oracle = pd.DataFrame(design_matrix_oracle)
+ oracle_model = LinearRegression()
+ oracle_model.fit(spline_basis_oracle, data_oracle["effects"])
+
+ # evaluate on grid
+ grid = {"x": np.linspace(0.1, 0.9, 100)}
+ spline_grid_oracle = pd.DataFrame(patsy.build_design_matrices([design_matrix_oracle.design_info], grid)[0])
+ oracle_cates = oracle_model.predict(spline_grid_oracle)
+
+ self.oracle_values = dict()
+ self.oracle_values["cates"] = oracle_cates
+ self.oracle_values["grid"] = grid
+
+ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ score = dml_params["score"]
+
+ # Model
+ dml_model = dml.DoubleMLPLR(
+ obj_dml_data=dml_data,
+ ml_l=ml_g,
+ ml_m=ml_m,
+ ml_g=ml_g if score == "IV-type" else None,
+ score=score,
+ )
+ dml_model.fit()
+
+ # cate
+ design_matrix = patsy.dmatrix("bs(x, df=5, degree=2)", {"x": dml_data.data["X_0"]})
+ spline_basis = pd.DataFrame(design_matrix)
+ cate_model = dml_model.cate(basis=spline_basis)
+
+ # evaluation spline basis
+ spline_grid = pd.DataFrame(patsy.build_design_matrices([design_matrix.design_info], self.oracle_values["grid"])[0])
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ confint = cate_model.confint(basis=spline_grid, level=level)
+ effects = confint["effect"]
+ uniform_confint = cate_model.confint(basis=spline_grid, level=0.95, joint=True, n_rep_boot=2000)
+ level_result["coverage"] = self._compute_coverage(
+ thetas=effects,
+ oracle_thetas=self.oracle_values["cates"],
+ confint=confint.iloc[:, [0, 2]],
+ joint_confint=uniform_confint.iloc[:, [0, 2]],
+ )
+
+ # add parameters to the result
+ for res in level_result.values():
+ res.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Score": score,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "Score", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_heterogeneous_data(
+ n_obs=dgp_params["n_obs"],
+ p=dgp_params["p"],
+ support_size=dgp_params["support_size"],
+ n_x=dgp_params["n_x"],
+ binary_treatment=False,
+ )
+ dml_data = dml.DoubleMLData(data["data"], "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/plm/plr_gate.py b/monte-cover/src/montecover/plm/plr_gate.py
new file mode 100644
index 0000000..b46ec67
--- /dev/null
+++ b/monte-cover/src/montecover/plm/plr_gate.py
@@ -0,0 +1,159 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.datasets import make_heterogeneous_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class PLRGATECoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLPLR for GATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _generate_groups(self, data):
+ """Generate groups for the simulation."""
+ groups = pd.DataFrame(
+ np.column_stack(
+ (
+ data["X_0"] <= 0.3,
+ (data["X_0"] > 0.3) & (data["X_0"] <= 0.7),
+ data["X_0"] > 0.7,
+ )
+ ),
+ columns=["Group 1", "Group 2", "Group 3"],
+ )
+ return groups
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ # Oracle values
+ data_oracle = make_heterogeneous_data(
+ n_obs=int(1e6),
+ p=self.dgp_parameters["p"][0],
+ support_size=self.dgp_parameters["support_size"][0],
+ n_x=self.dgp_parameters["n_x"][0],
+ binary_treatment=False,
+ )
+
+ self.logger.info("Calculating oracle values")
+ groups = self._generate_groups(data_oracle["data"])
+ oracle_gates = [data_oracle["effects"][groups[group]].mean() for group in groups.columns]
+
+ self.oracle_values = dict()
+ self.oracle_values["gates"] = oracle_gates
+
+ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ score = dml_params["score"]
+
+ # Model
+ dml_model = dml.DoubleMLPLR(
+ obj_dml_data=dml_data,
+ ml_l=ml_g,
+ ml_m=ml_m,
+ ml_g=ml_g if score == "IV-type" else None,
+ score=score,
+ )
+ dml_model.fit()
+
+ # gate
+ groups = self._generate_groups(dml_data.data)
+ gate_model = dml_model.gate(groups=groups)
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ confint = gate_model.confint(level=level)
+ effects = confint["effect"]
+ uniform_confint = gate_model.confint(level=0.95, joint=True, n_rep_boot=2000)
+ level_result["coverage"] = self._compute_coverage(
+ thetas=effects,
+ oracle_thetas=self.oracle_values["gates"],
+ confint=confint.iloc[:, [0, 2]],
+ joint_confint=uniform_confint.iloc[:, [0, 2]],
+ )
+
+ # add parameters to the result
+ for res in level_result.values():
+ res.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Score": score,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "Score", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "Uniform Coverage": "mean",
+ "Uniform CI Length": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_heterogeneous_data(
+ n_obs=dgp_params["n_obs"],
+ p=dgp_params["p"],
+ support_size=dgp_params["support_size"],
+ n_x=dgp_params["n_x"],
+ binary_treatment=False,
+ )
+ dml_data = dml.DoubleMLData(data["data"], "y", "d")
+ return dml_data
diff --git a/monte-cover/src/montecover/rdd/__init__.py b/monte-cover/src/montecover/rdd/__init__.py
new file mode 100644
index 0000000..50efadb
--- /dev/null
+++ b/monte-cover/src/montecover/rdd/__init__.py
@@ -0,0 +1,7 @@
+"""Monte Carlo coverage simulations for RDD."""
+
+from montecover.rdd.rdd import RDDCoverageSimulation
+
+__all__ = [
+ "RDDCoverageSimulation",
+]
diff --git a/monte-cover/src/montecover/rdd/rdd.py b/monte-cover/src/montecover/rdd/rdd.py
new file mode 100644
index 0000000..8c36d80
--- /dev/null
+++ b/monte-cover/src/montecover/rdd/rdd.py
@@ -0,0 +1,239 @@
+import time
+import warnings
+from itertools import product
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+import numpy as np
+import pandas as pd
+from doubleml.rdd.datasets import make_simple_rdd_data
+from rdrobust import rdrobust
+from statsmodels.nonparametric.kernel_regression import KernelReg
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class RDDCoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleML RDFlex for RDD."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ self.fuzzy = self.dgp_parameters.get("fuzzy", [False])[0]
+ self.cutoff = self.dgp_parameters.get("cutoff", [0.0])[0]
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config."""
+
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ data_oracle = make_simple_rdd_data(n_obs=int(1e6), fuzzy=self.fuzzy, cutoff=self.cutoff)
+ # get oracle value
+ score = data_oracle["score"]
+ ite = data_oracle["oracle_values"]["Y1"] - data_oracle["oracle_values"]["Y0"]
+
+ # subset score and ite for faster computation
+ score_subset = (score >= (self.cutoff - 0.02)) & (score <= (self.cutoff + 0.02))
+ self.logger.info(f"Oracle score subset size: {np.sum(score_subset)}")
+ kernel_reg = KernelReg(endog=ite[score_subset], exog=score[score_subset], var_type="c", reg_type="ll")
+ effect_at_cutoff, _ = kernel_reg.fit(np.array([self.cutoff]))
+ oracle_effect = effect_at_cutoff[0]
+
+ self.logger.info(f"Oracle effect at cutoff: {oracle_effect}")
+ self.oracle_values = dict()
+ self.oracle_values["theta"] = oracle_effect
+
+ def _process_repetition(self, i_rep):
+ """Process a single repetition with all parameter combinations."""
+ if self.suppress_warnings:
+ warnings.simplefilter(action="ignore", category=UserWarning)
+
+ i_param_comb = 0
+ rep_results = {
+ "coverage": [],
+ }
+
+ # loop through all parameter combinations
+ for dgp_param_values in product(*self.dgp_parameters.values()):
+ dgp_params = dict(zip(self.dgp_parameters.keys(), dgp_param_values))
+ dml_data = self._generate_dml_data(dgp_params)
+
+ # --- Run rdrobust benchmark ---
+ self.logger.debug(f"Rep {i_rep+1}: Running rdrobust benchmark for DGP {dgp_params}")
+ param_start_time_rd_benchmark = time.time()
+
+ # Call the dedicated benchmark function
+ # Pass dml_data, current dgp_params, and repetition index
+ benchmark_result_list = self._rdrobust_benchmark(dml_data, dgp_params, i_rep)
+ if benchmark_result_list:
+ rep_results["coverage"].extend(benchmark_result_list)
+
+ param_duration_rd_benchmark = time.time() - param_start_time_rd_benchmark
+ self.logger.debug(f"rdrobust benchmark for DGP {dgp_params} completed in {param_duration_rd_benchmark:.2f}s")
+
+ for dml_param_values in product(*self.dml_parameters.values()):
+ dml_params = dict(zip(self.dml_parameters.keys(), dml_param_values))
+ i_param_comb += 1
+
+ comb_results = self._process_parameter_combination(i_rep, i_param_comb, dgp_params, dml_params, dml_data)
+ rep_results["coverage"].extend(comb_results["coverage"])
+
+ return rep_results
+
+ def _rdrobust_benchmark(self, dml_data, dml_params, i_rep):
+ """Run a benchmark using rdrobust for RDD."""
+
+ # Extract parameters
+ score = dml_data.data[dml_data.s_col]
+ Y = dml_data.data[dml_data.y_col]
+ Z = dml_data.data[dml_data.x_cols]
+
+ benchmark_results_list = []
+ for level in self.confidence_parameters["level"]:
+ if self.fuzzy:
+ D = dml_data.data[dml_data.d_cols]
+ rd_model = rdrobust(y=Y, x=score, fuzzy=D, covs=Z, c=self.cutoff, level=level * 100)
+ else:
+ rd_model = rdrobust(y=Y, x=score, covs=Z, c=self.cutoff, level=level * 100)
+ coef_rd = rd_model.coef.loc["Robust", "Coeff"]
+ ci_lower_rd = rd_model.ci.loc["Robust", "CI Lower"]
+ ci_upper_rd = rd_model.ci.loc["Robust", "CI Upper"]
+
+ confint_for_compute = pd.DataFrame({"lower": [ci_lower_rd], "upper": [ci_upper_rd]})
+ theta_for_compute = np.array([coef_rd])
+
+ coverage_metrics = self._compute_coverage(
+ thetas=theta_for_compute,
+ oracle_thetas=self.oracle_values["theta"],
+ confint=confint_for_compute,
+ joint_confint=None,
+ )
+
+ # Add metadata
+ coverage_metrics.update(
+ {
+ "repetition": i_rep,
+ "Learner g": "Linear",
+ "Learner m": "Logistic",
+ "Method": "rdrobust",
+ "fs_specification": "cutoff",
+ "level": level,
+ }
+ )
+ benchmark_results_list.append(coverage_metrics)
+
+ return benchmark_results_list
+
+ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ if self.fuzzy:
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ else:
+ learner_m_name, ml_m = "N/A", None
+ fs_specification = dml_params["fs_specification"]
+
+ # Model
+ dml_model = dml.rdd.RDFlex(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ n_folds=5,
+ n_rep=1,
+ fuzzy=self.fuzzy,
+ cutoff=self.cutoff,
+ fs_specification=fs_specification,
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["theta"],
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res in level_result.values():
+ res.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Method": "RDFlex",
+ "fs_specification": fs_specification,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Method", "fs_specification", "Learner g", "Learner m", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_simple_rdd_data(
+ n_obs=dgp_params["n_obs"],
+ fuzzy=dgp_params["fuzzy"],
+ cutoff=dgp_params["cutoff"],
+ )
+
+ score = data["score"]
+ Y = data["Y"]
+ X = data["X"].reshape(dgp_params["n_obs"], -1)
+ D = data["D"]
+
+ dml_data = dml.DoubleMLData.from_arrays(y=Y, d=D, x=X, s=score)
+ return dml_data
diff --git a/monte-cover/src/montecover/ssm/__init__.py b/monte-cover/src/montecover/ssm/__init__.py
new file mode 100644
index 0000000..86d02b5
--- /dev/null
+++ b/monte-cover/src/montecover/ssm/__init__.py
@@ -0,0 +1,9 @@
+"""Monte Carlo coverage simulations for SSM."""
+
+from montecover.ssm.ssm_mar_ate import SSMMarATECoverageSimulation
+from montecover.ssm.ssm_nonig_ate import SSMNonIgnorableATECoverageSimulation
+
+__all__ = [
+ "SSMMarATECoverageSimulation",
+ "SSMNonIgnorableATECoverageSimulation",
+]
diff --git a/monte-cover/src/montecover/ssm/ssm_mar_ate.py b/monte-cover/src/montecover/ssm/ssm_mar_ate.py
new file mode 100644
index 0000000..ef86363
--- /dev/null
+++ b/monte-cover/src/montecover/ssm/ssm_mar_ate.py
@@ -0,0 +1,123 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+from doubleml.datasets import make_ssm_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class SSMMarATECoverageSimulation(BaseSimulation):
+ """Simulation class for coverage properties of DoubleMLSSM with missing at random for ATE estimation."""
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m", "ml_pi"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ self.oracle_values = dict()
+ self.oracle_values["theta"] = self.dgp_parameters["theta"]
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ learner_pi_name, ml_pi = create_learner_from_config(learner_config["ml_pi"])
+
+ # Model
+ dml_model = dml.DoubleMLSSM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ ml_pi=ml_pi,
+ score="missing-at-random",
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["theta"],
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Learner pi": learner_pi_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "Learner pi", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_ssm_data(
+ theta=dgp_params["theta"],
+ n_obs=dgp_params["n_obs"],
+ dim_x=dgp_params["dim_x"],
+ mar=True,
+ return_type="DataFrame",
+ )
+ dml_data = dml.DoubleMLData(data, "y", "d", s_col="s")
+ return dml_data
diff --git a/monte-cover/src/montecover/ssm/ssm_nonig_ate.py b/monte-cover/src/montecover/ssm/ssm_nonig_ate.py
new file mode 100644
index 0000000..8c82f29
--- /dev/null
+++ b/monte-cover/src/montecover/ssm/ssm_nonig_ate.py
@@ -0,0 +1,125 @@
+from typing import Any, Dict, Optional
+
+import doubleml as dml
+from doubleml.datasets import make_ssm_data
+
+from montecover.base import BaseSimulation
+from montecover.utils import create_learner_from_config
+
+
+class SSMNonIgnorableATECoverageSimulation(BaseSimulation):
+ """
+ Simulation class for coverage properties of DoubleMLSSM with nonignorable nonresponse for ATE estimation.
+ """
+
+ def __init__(
+ self,
+ config_file: str,
+ suppress_warnings: bool = True,
+ log_level: str = "INFO",
+ log_file: Optional[str] = None,
+ ):
+ super().__init__(
+ config_file=config_file,
+ suppress_warnings=suppress_warnings,
+ log_level=log_level,
+ log_file=log_file,
+ )
+
+ # Calculate oracle values
+ self._calculate_oracle_values()
+
+ def _process_config_parameters(self):
+ """Process simulation-specific parameters from config"""
+ # Process ML models in parameter grid
+ assert "learners" in self.dml_parameters, "No learners specified in the config file"
+
+ required_learners = ["ml_g", "ml_m", "ml_pi"]
+ for learner in self.dml_parameters["learners"]:
+ for ml in required_learners:
+ assert ml in learner, f"No {ml} specified in the config file"
+
+ def _calculate_oracle_values(self):
+ """Calculate oracle values for the simulation."""
+ self.logger.info("Calculating oracle values")
+
+ self.oracle_values = dict()
+ self.oracle_values["theta"] = self.dgp_parameters["theta"]
+
+ def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]:
+ """Run a single repetition with the given parameters."""
+ # Extract parameters
+ learner_config = dml_params["learners"]
+ learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"])
+ learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"])
+ learner_pi_name, ml_pi = create_learner_from_config(learner_config["ml_pi"])
+
+ # Model
+ dml_model = dml.DoubleMLSSM(
+ obj_dml_data=dml_data,
+ ml_g=ml_g,
+ ml_m=ml_m,
+ ml_pi=ml_pi,
+ score="nonignorable",
+ )
+ dml_model.fit()
+
+ result = {
+ "coverage": [],
+ }
+ for level in self.confidence_parameters["level"]:
+ level_result = dict()
+ level_result["coverage"] = self._compute_coverage(
+ thetas=dml_model.coef,
+ oracle_thetas=self.oracle_values["theta"],
+ confint=dml_model.confint(level=level),
+ joint_confint=None,
+ )
+
+ # add parameters to the result
+ for res_metric in level_result.values():
+ res_metric.update(
+ {
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Learner pi": learner_pi_name,
+ "level": level,
+ }
+ )
+ for key, res in level_result.items():
+ result[key].append(res)
+
+ return result
+
+ def summarize_results(self):
+ """Summarize the simulation results."""
+ self.logger.info("Summarizing simulation results")
+
+ # Group by parameter combinations
+ groupby_cols = ["Learner g", "Learner m", "Learner pi", "level"]
+ aggregation_dict = {
+ "Coverage": "mean",
+ "CI Length": "mean",
+ "Bias": "mean",
+ "repetition": "count",
+ }
+
+ # Aggregate results (possibly multiple result dfs)
+ result_summary = dict()
+ for result_name, result_df in self.results.items():
+ result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index()
+ self.logger.debug(f"Summarized {result_name} results")
+
+ return result_summary
+
+ def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData:
+ """Generate data for the simulation."""
+ data = make_ssm_data(
+ theta=dgp_params["theta"],
+ n_obs=dgp_params["n_obs"],
+ dim_x=dgp_params["dim_x"],
+ mar=False,
+ return_type="DataFrame",
+ )
+ dml_data = dml.DoubleMLData(data, "y", "d", z_cols="z", s_col="s")
+ return dml_data
diff --git a/monte-cover/src/montecover/utils.py b/monte-cover/src/montecover/utils.py
new file mode 100644
index 0000000..838cb43
--- /dev/null
+++ b/monte-cover/src/montecover/utils.py
@@ -0,0 +1,74 @@
+from typing import Any, Callable, Dict, Tuple
+
+from doubleml.utils import GlobalClassifier, GlobalRegressor
+from lightgbm import LGBMClassifier, LGBMRegressor
+from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, StackingClassifier, StackingRegressor
+from sklearn.linear_model import LassoCV, LinearRegression, LogisticRegression, Ridge
+
+LearnerInstantiator = Callable[[Dict[str, Any]], Any]
+# Map learner abbreviations to their instantiation logic
+LEARNER_REGISTRY: Dict[str, LearnerInstantiator] = {
+ "LassoCV": lambda params: LassoCV(**params),
+ "RF Regr.": lambda params: RandomForestRegressor(**params),
+ "RF Clas.": lambda params: RandomForestClassifier(**params),
+ "LGBM Regr.": lambda params: LGBMRegressor(**{**{"verbose": -1, "n_jobs": 1}, **params}),
+ "LGBM Clas.": lambda params: LGBMClassifier(**{**{"verbose": -1, "n_jobs": 1}, **params}),
+ "Linear": lambda params: LinearRegression(**params),
+ "Logistic": lambda params: LogisticRegression(**params),
+ "Global Linear": lambda params: GlobalRegressor(LinearRegression(**params)),
+ "Global Logistic": lambda params: GlobalClassifier(LogisticRegression(**params)),
+ "Stacked Regr.": lambda params: StackingRegressor(
+ estimators=[
+ ("lr", LinearRegression()),
+ (
+ "lgbm",
+ LGBMRegressor(**{**{"verbose": -1, "n_jobs": 1}, **params}),
+ ),
+ ("glr", GlobalRegressor(LinearRegression())),
+ ],
+ final_estimator=Ridge(),
+ ),
+ "Stacked Clas.": lambda params: StackingClassifier(
+ estimators=[
+ ("lr", LogisticRegression()),
+ (
+ "lgbm",
+ LGBMClassifier(**{**{"verbose": -1, "n_jobs": 1}, **params}),
+ ),
+ ("glr", GlobalClassifier(LogisticRegression())),
+ ],
+ final_estimator=LogisticRegression(),
+ ),
+}
+
+
+def create_learner_from_config(learner_config: Dict[str, Any]) -> Tuple[str, Any]:
+ """
+ Instantiates a machine learning model based on a configuration dictionary.
+ The 'name' in learner_config should use the defined abbreviations.
+
+ Args:
+ learner_config: A dictionary containing 'name' (str) for the learner
+ (e.g., "LassoCV", "RF Regr.", "RF Clas.", "LGBM Regr.",
+ "LGBM Clas.", "Linear", "Logostic")
+ and optionally 'params' (dict) for its hyperparameters.
+
+ Returns:
+ A tuple containing the learner's abbreviated name (str) and the instantiated learner object.
+
+ Raises:
+ ValueError: If the learner name in the config is unknown.
+ """
+ learner_name_abbr = learner_config["name"]
+ params = learner_config.get("params", {})
+
+ if learner_name_abbr not in LEARNER_REGISTRY:
+ raise ValueError(
+ f"Unknown learner name abbreviation in config: {learner_name_abbr}. "
+ f"Available learners are: {', '.join(LEARNER_REGISTRY.keys())}"
+ )
+
+ instantiator = LEARNER_REGISTRY[learner_name_abbr]
+ learner = instantiator(params)
+
+ return (learner_name_abbr, learner)
diff --git a/monte-cover/uv.lock b/monte-cover/uv.lock
index 57e1dd3..dd4c066 100644
--- a/monte-cover/uv.lock
+++ b/monte-cover/uv.lock
@@ -77,6 +77,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 },
]
+[[package]]
+name = "cfgv"
+version = "3.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249 },
+]
+
[[package]]
name = "click"
version = "8.1.8"
@@ -186,9 +195,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190 },
]
+[[package]]
+name = "distlib"
+version = "0.3.9"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973 },
+]
+
[[package]]
name = "doubleml"
-version = "0.9.3"
+version = "0.10.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "joblib" },
@@ -198,11 +216,12 @@ dependencies = [
{ name = "plotly" },
{ name = "scikit-learn" },
{ name = "scipy" },
+ { name = "seaborn" },
{ name = "statsmodels" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/48/c4/5798ab5c520868d31c625df3600e942612dea707a8da613a1a4341d47f1f/doubleml-0.9.3.tar.gz", hash = "sha256:a1f6337a5700856a3ab77af0b44449741d0fcb188b03ce7d15c0c0d0db0aca29", size = 226094 }
+sdist = { url = "https://files.pythonhosted.org/packages/24/e0/145b63e9b682a139911f5a3dbc6c34aa77a460b1747a2d418599eeeb2974/doubleml-0.10.0.tar.gz", hash = "sha256:648a4440f4e9c3586f78d338430e3b914f147f16cc13da864cef1439aad8e7a1", size = 294772 }
wheels = [
- { url = "https://files.pythonhosted.org/packages/97/89/59665f3e7f1a2d99d6fd0babf61b2560c96686fe1fc17f8201f0a0c0baa0/DoubleML-0.9.3-py3-none-any.whl", hash = "sha256:c2ef19d8355babaf03392ae705353f309a684f4a8191cf8e2a7fed74db419808", size = 342917 },
+ { url = "https://files.pythonhosted.org/packages/5a/04/41d0f9cc48ca4b1d4c357961010d19f8a67fa5c2a139ddd5766655dd6ab5/doubleml-0.10.0-py3-none-any.whl", hash = "sha256:6bac311bc937bfed82d2e0d2dea0ac911604469fd144aa2de392daaca14134ed", size = 443289 },
]
[package.optional-dependencies]
@@ -219,6 +238,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 },
]
+[[package]]
+name = "filelock"
+version = "3.18.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215 },
+]
+
[[package]]
name = "fonttools"
version = "4.56.0"
@@ -244,6 +272,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/bf/ff/44934a031ce5a39125415eb405b9efb76fe7f9586b75291d66ae5cbfc4e6/fonttools-4.56.0-py3-none-any.whl", hash = "sha256:1088182f68c303b50ca4dc0c82d42083d176cba37af1937e1a976a31149d4d14", size = 1089800 },
]
+[[package]]
+name = "identify"
+version = "2.6.12"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a2/88/d193a27416618628a5eea64e3223acd800b40749a96ffb322a9b55a49ed1/identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6", size = 99254 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7a/cd/18f8da995b658420625f7ef13f037be53ae04ec5ad33f9b718240dcfd48c/identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2", size = 99145 },
+]
+
[[package]]
name = "ipykernel"
version = "6.29.5"
@@ -511,6 +548,7 @@ dependencies = [
{ name = "lightgbm" },
{ name = "numpy" },
{ name = "pandas" },
+ { name = "pre-commit" },
{ name = "pyyaml" },
{ name = "ruff" },
{ name = "scikit-learn" },
@@ -519,13 +557,14 @@ dependencies = [
[package.metadata]
requires-dist = [
{ name = "black", specifier = ">=25.1.0" },
- { name = "doubleml", extras = ["rdd"], specifier = ">=0.9.3" },
+ { name = "doubleml", extras = ["rdd"], specifier = ">=0.10.0" },
{ name = "ipykernel", specifier = ">=6.29.5" },
{ name = "itables", specifier = ">=2.2.5" },
{ name = "joblib", specifier = ">=1.4.2" },
{ name = "lightgbm", specifier = ">=4.6.0" },
{ name = "numpy", specifier = ">=2.2.4" },
{ name = "pandas", specifier = ">=2.2.3" },
+ { name = "pre-commit", specifier = ">=4.2.0" },
{ name = "pyyaml", specifier = ">=6.0.2" },
{ name = "ruff", specifier = ">=0.11.0" },
{ name = "scikit-learn", specifier = ">=1.5.2" },
@@ -558,6 +597,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195 },
]
+[[package]]
+name = "nodeenv"
+version = "1.9.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 },
+]
+
[[package]]
name = "numpy"
version = "2.2.4"
@@ -758,6 +806,22 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4d/c5/7cfda7ba9fa02243367fbfb4880b6de8039266f22c47c2dbbd39b6adc46f/plotnine-0.14.5-py3-none-any.whl", hash = "sha256:4a8bc4360732dd69a0263def4abab285ed8f0f4386186f1e44c642f2cea79b88", size = 1301197 },
]
+[[package]]
+name = "pre-commit"
+version = "4.2.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "cfgv" },
+ { name = "identify" },
+ { name = "nodeenv" },
+ { name = "pyyaml" },
+ { name = "virtualenv" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/08/39/679ca9b26c7bb2999ff122d50faa301e49af82ca9c066ec061cfbc0c6784/pre_commit-4.2.0.tar.gz", hash = "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146", size = 193424 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/88/74/a88bf1b1efeae488a0c0b7bdf71429c313722d1fc0f377537fbe554e6180/pre_commit-4.2.0-py2.py3-none-any.whl", hash = "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd", size = 220707 },
+]
+
[[package]]
name = "prompt-toolkit"
version = "3.0.50"
@@ -1035,6 +1099,20 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/0a/c8/b3f566db71461cabd4b2d5b39bcc24a7e1c119535c8361f81426be39bb47/scipy-1.15.2-cp313-cp313t-win_amd64.whl", hash = "sha256:fe8a9eb875d430d81755472c5ba75e84acc980e4a8f6204d402849234d3017db", size = 40477705 },
]
+[[package]]
+name = "seaborn"
+version = "0.13.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "matplotlib" },
+ { name = "numpy" },
+ { name = "pandas" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/86/59/a451d7420a77ab0b98f7affa3a1d78a313d2f7281a57afb1a34bae8ab412/seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7", size = 1457696 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/83/11/00d3c3dfc25ad54e731d91449895a79e4bf2384dc3ac01809010ba88f6d5/seaborn-0.13.2-py3-none-any.whl", hash = "sha256:636f8336facf092165e27924f223d3c62ca560b1f2bb5dff7ab7fad265361987", size = 294914 },
+]
+
[[package]]
name = "six"
version = "1.17.0"
@@ -1130,6 +1208,20 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/0f/dd/84f10e23edd882c6f968c21c2434fe67bd4a528967067515feca9e611e5e/tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639", size = 346762 },
]
+[[package]]
+name = "virtualenv"
+version = "20.31.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "distlib" },
+ { name = "filelock" },
+ { name = "platformdirs" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/56/2c/444f465fb2c65f40c3a104fd0c495184c4f2336d65baf398e3c75d72ea94/virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af", size = 6076316 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f3/40/b1c265d4b2b62b58576588510fc4d1fe60a86319c8de99fd8e9fec617d2c/virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11", size = 6057982 },
+]
+
[[package]]
name = "wcwidth"
version = "0.2.13"
diff --git a/requirements.txt b/requirements.txt
index 2accaab..1f7cde5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,4 +5,5 @@ pandas
scikit-learn
lightgbm
itables
-ipykernel
\ No newline at end of file
+ipykernel
+pre-commit>=4.2.0
diff --git a/results/did/did_cs_atte_coverage_metadata.csv b/results/did/did_cs_atte_coverage_metadata.csv
index 19aa007..08604a2 100644
--- a/results/did/did_cs_atte_coverage_metadata.csv
+++ b/results/did/did_cs_atte_coverage_metadata.csv
@@ -1,2 +1,2 @@
DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,did_cs_atte_coverage.py,2025-05-22 15:05:03,12767.107241630554,3.12.3
+0.11.dev0,did_cs_atte_coverage.py,2025-06-06 09:10:00,12688.770802021027,3.12.3
diff --git a/results/did/did_cs_multi_config.yml b/results/did/did_cs_multi_config.yml
new file mode 100644
index 0000000..d469e0b
--- /dev/null
+++ b/results/did/did_cs_multi_config.yml
@@ -0,0 +1,61 @@
+simulation_parameters:
+ repetitions: 500
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ DGP:
+ - 1
+ - 4
+ - 6
+ n_obs:
+ - 2000
+ lambda_t:
+ - 0.5
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logistic: &id002
+ name: Logistic
+ lgbmr: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 300
+ learning_rate: 0.03
+ num_leaves: 7
+ max_depth: 3
+ min_child_samples: 20
+ subsample: 0.8
+ colsample_bytree: 0.8
+ reg_alpha: 0.1
+ reg_lambda: 1.0
+ random_state: 42
+ lgbmc: &id004
+ name: LGBM Clas.
+ params:
+ n_estimators: 300
+ learning_rate: 0.03
+ num_leaves: 7
+ max_depth: 3
+ min_child_samples: 20
+ subsample: 0.8
+ colsample_bytree: 0.8
+ reg_alpha: 0.1
+ reg_lambda: 1.0
+ random_state: 42
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ score:
+ - observational
+ - experimental
+ in_sample_normalization:
+ - true
+ - false
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/did/did_cs_multi_detailed.csv b/results/did/did_cs_multi_detailed.csv
new file mode 100644
index 0000000..795dc0d
--- /dev/null
+++ b/results/did/did_cs_multi_detailed.csv
@@ -0,0 +1,49 @@
+Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.9,0.7445,1.770325608867807,0.5949971587062639,0.398,2.7648307369564926,500
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.95,0.824,2.1094730725193043,0.5949971587062639,0.566,3.034916611141197,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.9,0.773,1.766478883435425,0.5692557054828591,0.472,2.773314379428287,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.95,0.8455,2.1048894164526817,0.5692557054828591,0.624,3.0389648164538805,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.9,0.9618333333333333,1.7719758223340252,0.34491539054381054,0.978,2.780855944642299,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.95,0.9855,2.111439423146251,0.34491539054381054,0.99,3.0466788998860315,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.9,0.7488333333333334,1.7704687014862623,0.5945326473164728,0.412,2.762232396348127,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.95,0.8228333333333334,2.1096435778907434,0.5945326473164728,0.548,3.029512649083705,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.9,0.7753333333333333,1.7663884619807455,0.5717727216970863,0.494,2.774302186004533,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.95,0.8461666666666666,2.1047816726439335,0.5717727216970863,0.628,3.040160202574957,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.9,0.9606666666666667,1.7721766188302754,0.34137474439314713,0.986,2.777950990796601,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.95,0.9866666666666666,2.111678686929012,0.34137474439314713,0.996,3.044521002012899,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.9,0.9501666666666666,1.990648327452718,0.4052261056167495,0.976,3.1272474543428164,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.95,0.981,2.372003784265803,0.4052261056167495,0.992,3.425556561218648,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.9,0.9326666666666666,2.6295041141393964,0.5680990688936988,0.942,4.109931474457153,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.95,0.9701666666666666,3.1332474066187324,0.5680990688936988,0.982,4.507999042258337,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.9,0.9541666666666666,2.021214888425688,0.4123935257729841,0.974,3.17183347081639,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.95,0.9835,2.408426088145391,0.4123935257729841,0.99,3.4747764416681246,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.9,0.9496666666666667,1.9511518959677854,0.4056426955645088,0.98,3.067238579715148,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.95,0.9818333333333333,2.324940883373038,0.4056426955645088,0.994,3.3568971294243126,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.9,0.9341666666666666,2.585977302930749,0.5562229964093515,0.954,4.045565581860224,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.95,0.9745,3.0813820120736044,0.5562229964093515,0.97,4.434188862890503,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.9,0.9543333333333334,1.9905001066838395,0.40929563062222535,0.988,3.125148883705166,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.95,0.9835,2.371827168326243,0.40929563062222535,0.994,3.420794663008727,500
+Linear,Logistic,experimental,False,1,0.9,0.9168333333333334,0.41673836812489545,0.09490120305554799,0.92,0.6499646304710539,500
+Linear,Logistic,experimental,False,1,0.95,0.9591666666666666,0.4965743936830481,0.09490120305554799,0.958,0.7135508775059938,500
+Linear,Logistic,experimental,False,4,0.9,0.7525,2.805808333249603,0.9424974705539232,0.342,4.392358230173571,500
+Linear,Logistic,experimental,False,4,0.95,0.8215,3.343326841114613,0.9424974705539232,0.49,4.81356703917665,500
+Linear,Logistic,experimental,False,6,0.9,0.9681666666666666,2.927962952235543,0.5448624818472574,0.992,4.5809829929878605,500
+Linear,Logistic,experimental,False,6,0.95,0.989,3.488883047353698,0.5448624818472574,0.996,5.023039124726024,500
+Linear,Logistic,experimental,True,1,0.9,0.9198333333333334,0.4167745201915361,0.09489757179213576,0.92,0.6493074421900091,500
+Linear,Logistic,experimental,True,1,0.95,0.959,0.49661747152743585,0.09489757179213576,0.952,0.7129847857545065,500
+Linear,Logistic,experimental,True,4,0.9,0.7515,2.806389863604486,0.9438172172038332,0.336,4.394024798505712,500
+Linear,Logistic,experimental,True,4,0.95,0.8215,3.344019777271856,0.9438172172038332,0.496,4.818369602897961,500
+Linear,Logistic,experimental,True,6,0.9,0.9675,2.927774549870841,0.545976377808569,0.99,4.5788635431480404,500
+Linear,Logistic,experimental,True,6,0.95,0.9896666666666666,3.4886585520895808,0.545976377808569,0.998,5.023223313601676,500
+Linear,Logistic,observational,False,1,0.9,0.9445,0.4474147057259759,0.09208439317406426,0.968,0.6963338800760291,500
+Linear,Logistic,observational,False,1,0.95,0.978,0.5331275044830301,0.09208439317406426,0.986,0.7642780321317574,500
+Linear,Logistic,observational,False,4,0.9,0.8395,3.393272580905659,0.9671859927018581,0.682,5.280031123475279,500
+Linear,Logistic,observational,False,4,0.95,0.9018333333333334,4.043333667706703,0.9671859927018581,0.804,5.803033620961812,500
+Linear,Logistic,observational,False,6,0.9,0.9661666666666666,2.9968869470930812,0.5607913346640213,0.992,4.688634179932907,500
+Linear,Logistic,observational,False,6,0.95,0.9886666666666666,3.571011052774927,0.5607913346640213,1.0,5.1435718965101325,500
+Linear,Logistic,observational,True,1,0.9,0.9446666666666667,0.44607501860443266,0.09181865803868633,0.972,0.694040589591067,500
+Linear,Logistic,observational,True,1,0.95,0.9766666666666667,0.5315311688178055,0.09181865803868633,0.982,0.7624234124861663,500
+Linear,Logistic,observational,True,4,0.9,0.8425,3.3996101092641116,0.9626068949383512,0.708,5.289944291369528,500
+Linear,Logistic,observational,True,4,0.95,0.9095,4.050885298520558,0.9626068949383512,0.81,5.812309801411936,500
+Linear,Logistic,observational,True,6,0.9,0.9653333333333334,2.9934246048394693,0.5632386309587609,0.99,4.67988904592181,500
+Linear,Logistic,observational,True,6,0.95,0.9878333333333333,3.5668854175159357,0.5632386309587609,1.0,5.131467888102042,500
diff --git a/results/did/did_cs_multi_eventstudy.csv b/results/did/did_cs_multi_eventstudy.csv
new file mode 100644
index 0000000..1b97a50
--- /dev/null
+++ b/results/did/did_cs_multi_eventstudy.csv
@@ -0,0 +1,49 @@
+Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.9,0.623,1.2344028525035762,0.5867917352577009,0.35,1.7693962242178276,500
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.95,0.7186666666666667,1.4708817208279752,0.5867917352577009,0.502,1.9609823371949875,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.9,0.6716666666666666,1.2457079936515478,0.5587834594489091,0.456,1.787988168603755,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.95,0.7693333333333334,1.4843526273737648,0.5587834594489091,0.596,1.9848441118866604,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.9,0.9553333333333334,1.2548116303893624,0.25441640771793017,0.968,1.799914731144992,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.95,0.9806666666666666,1.4952002796159412,0.25441640771793017,0.988,1.9964128112202024,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.9,0.6173333333333334,1.2342398029059287,0.5878297827043432,0.328,1.7676223235760382,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.95,0.7153333333333334,1.4706874352490966,0.5878297827043432,0.482,1.962154166560331,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.9,0.665,1.2456282746415375,0.5567609889479388,0.482,1.7890029538815984,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.95,0.768,1.4842576363144127,0.5567609889479388,0.61,1.9841482801792536,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.9,0.9473333333333334,1.2549629324084535,0.2529649046165915,0.978,1.8004004709049248,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.95,0.9836666666666666,1.4953805670915852,0.2529649046165915,0.992,1.9964711805317528,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.9,0.95,1.438086091604987,0.2928300824932225,0.974,2.0655092074851575,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.95,0.9806666666666666,1.7135852698562943,0.2928300824932225,0.988,2.29250119775212,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.9,0.9133333333333333,1.8958082010478872,0.43665208441716385,0.934,2.7174153287309672,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.95,0.9593333333333334,2.258994803407606,0.43665208441716385,0.974,3.018313563700997,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.9,0.9463333333333334,1.429508949123356,0.296161119189052,0.954,2.054951833185491,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.95,0.9753333333333334,1.7033649742148993,0.296161119189052,0.986,2.280416092396764,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.9,0.95,1.401299278120269,0.2939067762871093,0.976,2.013012735107051,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.95,0.985,1.6697510779533535,0.2939067762871093,0.99,2.235222163171953,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.9,0.919,1.862906524183341,0.42400027998341316,0.93,2.6738713551739077,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.95,0.9666666666666667,2.2197900373245583,0.42400027998341316,0.96,2.9647553494845016,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.9,0.9476666666666667,1.4093623247425004,0.2961286220265989,0.968,2.0231777330879597,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.95,0.9766666666666667,1.679358790594952,0.2961286220265989,0.99,2.2460491085047685,500
+Linear,Logistic,experimental,False,1,0.9,0.8936666666666666,0.29668129511983965,0.07431255501976836,0.9,0.4234475244785486,500
+Linear,Logistic,experimental,False,1,0.95,0.9456666666666667,0.353517567638704,0.07431255501976836,0.944,0.4706234364546205,500
+Linear,Logistic,experimental,False,4,0.9,0.6223333333333334,1.9676971347315257,0.9611549277615734,0.358,2.821851876593927,500
+Linear,Logistic,experimental,False,4,0.95,0.7223333333333334,2.3446557513474295,0.9611549277615734,0.514,3.1296803439927747,500
+Linear,Logistic,experimental,False,6,0.9,0.9593333333333334,2.0617828042693422,0.4014842496440879,0.976,2.9502179783272156,500
+Linear,Logistic,experimental,False,6,0.95,0.9836666666666666,2.4567657413999964,0.4014842496440879,0.994,3.281559353958586,500
+Linear,Logistic,experimental,True,1,0.9,0.8956666666666666,0.29669741633654045,0.07427451790842504,0.904,0.42300310272980823,500
+Linear,Logistic,experimental,True,1,0.95,0.9466666666666667,0.3535367772532271,0.07427451790842504,0.944,0.4703234973074518,500
+Linear,Logistic,experimental,True,4,0.9,0.6253333333333334,1.968237754015678,0.9612257304593996,0.378,2.8228298488222006,500
+Linear,Logistic,experimental,True,4,0.95,0.7253333333333334,2.34529993895715,0.9612257304593996,0.504,3.131512414726046,500
+Linear,Logistic,experimental,True,6,0.9,0.9626666666666667,2.061499649901614,0.40221977991502417,0.978,2.956723898358197,500
+Linear,Logistic,experimental,True,6,0.95,0.986,2.456428342160502,0.40221977991502417,0.994,3.27621932440078,500
+Linear,Logistic,observational,False,1,0.9,0.9463333333333334,0.3176061939182343,0.06584511870516535,0.976,0.4526647021991571,500
+Linear,Logistic,observational,False,1,0.95,0.9766666666666667,0.37845112242619566,0.06584511870516535,0.992,0.503374033380795,500
+Linear,Logistic,observational,False,4,0.9,0.7153333333333334,2.4038988361616647,0.9742433088378242,0.612,3.4426054640030586,500
+Linear,Logistic,observational,False,4,0.95,0.818,2.8644221371155596,0.9742433088378242,0.742,3.818906934969005,500
+Linear,Logistic,observational,False,6,0.9,0.9583333333333334,2.1093483697182047,0.41187232611430147,0.984,3.023378654122078,500
+Linear,Logistic,observational,False,6,0.95,0.9846666666666666,2.5134436084493803,0.41187232611430147,0.994,3.352228561843766,500
+Linear,Logistic,observational,True,1,0.9,0.9443333333333334,0.3168146024540254,0.06585930633635377,0.974,0.4518084310690513,500
+Linear,Logistic,observational,True,1,0.95,0.9743333333333334,0.37750788301881183,0.06585930633635377,0.994,0.5023115027058873,500
+Linear,Logistic,observational,True,4,0.9,0.7176666666666667,2.41160294711594,0.9692477115042165,0.61,3.4556749332846293,500
+Linear,Logistic,observational,True,4,0.95,0.824,2.873602151528918,0.9692477115042165,0.77,3.831797951807144,500
+Linear,Logistic,observational,True,6,0.9,0.9586666666666667,2.1086877347027313,0.41243280545073824,0.984,3.0221755013295026,500
+Linear,Logistic,observational,True,6,0.95,0.9853333333333334,2.5126564132752702,0.41243280545073824,0.998,3.35521140900093,500
diff --git a/results/did/did_cs_multi_group.csv b/results/did/did_cs_multi_group.csv
new file mode 100644
index 0000000..795a81c
--- /dev/null
+++ b/results/did/did_cs_multi_group.csv
@@ -0,0 +1,49 @@
+Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.9,0.648,1.958130612753029,0.6931432900546463,0.336,2.5122538474084704,500
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.95,0.7266666666666667,2.333256537321367,0.6931432900546463,0.46,2.837637387247171,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.9,0.684,1.9405327407159012,0.656846624632093,0.378,2.4916795872647755,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.95,0.7486666666666666,2.312287379438766,0.656846624632093,0.494,2.8099028821210648,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.9,0.958,1.9418353648783815,0.37543970576234414,0.968,2.4937112856330335,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.95,0.9826666666666666,2.313839551864336,0.37543970576234414,0.994,2.817681872392808,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.9,0.65,1.9571814998792045,0.6889770027800006,0.336,2.509453326822512,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.95,0.7273333333333333,2.3321255995774375,0.6889770027800006,0.426,2.83457592815282,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.9,0.6873333333333332,1.9400950244352047,0.6604767279846773,0.382,2.4928028949564713,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.95,0.75,2.311765808320488,0.6604767279846773,0.502,2.8123234932785457,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.9,0.96,1.9424489320197122,0.3773552808347474,0.972,2.493917731063607,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.95,0.984,2.3145706622071662,0.3773552808347474,0.996,2.8163239892073255,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.9,0.9426666666666667,2.1455761943315403,0.4381141091824277,0.95,2.7514817504646536,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.95,0.976,2.5566117240293487,0.4381141091824277,0.984,3.1105932950425177,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.9,0.92,2.770826181354666,0.6218240106432057,0.92,3.557591638284541,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.95,0.9606666666666667,3.3016430361289624,0.6218240106432057,0.962,4.018077865773314,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.9,0.9513333333333334,2.206974710708492,0.44767634643371434,0.974,2.8329429832963875,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.95,0.982,2.6297725687581597,0.44767634643371434,0.988,3.2016185335069935,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.9,0.936,2.1203778078700832,0.44909450499809317,0.952,2.720027329156187,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.95,0.9746666666666667,2.526585994612616,0.44909450499809317,0.978,3.0751075279585267,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.9,0.9226666666666666,2.7276343103261955,0.601075346222161,0.93,3.5001905789415355,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.95,0.966,3.2501767474248435,0.601075346222161,0.96,3.948744210132073,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.9,0.9493333333333334,2.173939421934124,0.4498220130180585,0.966,2.7918554238706332,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.95,0.9826666666666666,2.590408594264792,0.4498220130180585,0.992,3.151112383580006,500
+Linear,Logistic,experimental,False,1,0.9,0.9013333333333333,0.37265738853214053,0.08859319616682844,0.904,0.47839941611486514,500
+Linear,Logistic,experimental,False,1,0.95,0.9526666666666667,0.44404866677981547,0.08859319616682844,0.956,0.5406676902427345,500
+Linear,Logistic,experimental,False,4,0.9,0.6673333333333332,3.1336232691639196,1.0895923128095015,0.316,4.02233178605052,500
+Linear,Logistic,experimental,False,4,0.95,0.728,3.7339424299175934,1.0895923128095015,0.394,4.5438248369278265,500
+Linear,Logistic,experimental,False,6,0.9,0.958,3.2599730175255166,0.5961529828509201,0.97,4.184613732542172,500
+Linear,Logistic,experimental,False,6,0.95,0.982,3.8844974411275577,0.5961529828509201,0.99,4.720414550175649,500
+Linear,Logistic,experimental,True,1,0.9,0.9046666666666666,0.3726964218885218,0.08870041349802113,0.9,0.4783660681766364,500
+Linear,Logistic,experimental,True,1,0.95,0.9526666666666667,0.44409517789268865,0.08870041349802113,0.954,0.5410617011776719,500
+Linear,Logistic,experimental,True,4,0.9,0.67,3.134323095972308,1.0902383455706834,0.32,4.0225478373755905,500
+Linear,Logistic,experimental,True,4,0.95,0.7333333333333333,3.734776324993351,1.0902383455706834,0.39,4.5485148620710625,500
+Linear,Logistic,experimental,True,6,0.9,0.9546666666666667,3.259435631958217,0.5970778315125913,0.968,4.184156255097815,500
+Linear,Logistic,experimental,True,6,0.95,0.9813333333333334,3.883857106729127,0.5970778315125913,0.994,4.7239829009315395,500
+Linear,Logistic,observational,False,1,0.9,0.9446666666666667,0.39837191256296084,0.08144967759883086,0.964,0.5123795593054733,500
+Linear,Logistic,observational,False,1,0.95,0.9806666666666666,0.47468941204382226,0.08144967759883086,0.982,0.5784243257010652,500
+Linear,Logistic,observational,False,4,0.9,0.754,3.6857060219439988,1.1215026888858457,0.56,4.722848754761831,500
+Linear,Logistic,observational,False,4,0.95,0.8213333333333334,4.391789605012528,1.1215026888858457,0.692,5.344408749712175,500
+Linear,Logistic,observational,False,6,0.9,0.9606666666666667,3.336166902774549,0.6136791390581188,0.972,4.281255977087202,500
+Linear,Logistic,observational,False,6,0.95,0.9833333333333334,3.9752880552486802,0.6136791390581188,0.988,4.8345238298141755,500
+Linear,Logistic,observational,True,1,0.9,0.946,0.39727633051259076,0.08107017958546997,0.96,0.5105019305400308,500
+Linear,Logistic,observational,True,1,0.95,0.9773333333333334,0.47338394551132973,0.08107017958546997,0.982,0.5760144793290006,500
+Linear,Logistic,observational,True,4,0.9,0.746,3.6837636034756183,1.1142685809512556,0.588,4.723009055885042,500
+Linear,Logistic,observational,True,4,0.95,0.83,4.389475070649987,1.1142685809512556,0.704,5.332966188093874,500
+Linear,Logistic,observational,True,6,0.9,0.9573333333333334,3.3344393970984694,0.6206253517833774,0.97,4.276874358539161,500
+Linear,Logistic,observational,True,6,0.95,0.9826666666666666,3.9732296052731164,0.6206253517833774,0.992,4.832204250841039,500
diff --git a/results/did/did_cs_multi_metadata.csv b/results/did/did_cs_multi_metadata.csv
new file mode 100644
index 0000000..3434afe
--- /dev/null
+++ b/results/did/did_cs_multi_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,DIDCSMultiCoverageSimulation,2025-06-13 08:24,86.96345181862513,3.12.9,scripts/did/did_cs_multi_config.yml
diff --git a/results/did/did_cs_multi_time.csv b/results/did/did_cs_multi_time.csv
new file mode 100644
index 0000000..393f4eb
--- /dev/null
+++ b/results/did/did_cs_multi_time.csv
@@ -0,0 +1,49 @@
+Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.9,0.6846666666666666,1.6852637187978814,0.6568561745562633,0.644,2.166373850978708,500
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.95,0.8013333333333333,2.0081155789027143,0.6568561745562633,0.794,2.4432403323739083,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.9,0.6946666666666667,1.6572625832981909,0.6444035333046219,0.63,2.1287742018575164,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.95,0.8106666666666666,1.97475016801972,0.6444035333046219,0.776,2.4050156056560694,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.9,0.9606666666666667,1.678145359731322,0.32086557637982427,0.98,2.1561372793012143,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.95,0.984,1.9996335309132296,0.32086557637982427,0.992,2.4339888129396257,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.9,0.694,1.6853618715300172,0.6617200953838079,0.63,2.164988518441371,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.95,0.8013333333333333,2.0082325350967603,0.6617200953838079,0.772,2.4431988690684,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.9,0.6793333333333333,1.6570297419818631,0.639660290746131,0.65,2.130950656548393,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.95,0.806,1.9744727204787125,0.639660290746131,0.782,2.4054991633398193,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.9,0.9626666666666667,1.677818276995257,0.31433596219616716,0.988,2.155945941033667,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.95,0.9906666666666666,1.9992437877943599,0.31433596219616716,1.0,2.4347112974605247,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.9,0.952,1.9285517129609813,0.3837221142569943,0.956,2.4751286634818546,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.95,0.98,2.298011104326713,0.3837221142569943,0.984,2.7955267219007296,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.9,0.9206666666666666,2.774724635621781,0.6047701373727594,0.914,3.562841935465964,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.95,0.958,3.3062883309039646,0.6047701373727594,0.954,4.026554617243503,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.9,0.964,1.9254223403023532,0.37567005141848064,0.97,2.470542968789135,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.95,0.9873333333333334,2.2942822268116463,0.37567005141848064,0.992,2.7963395465549707,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.9,0.9586666666666667,1.8968224696755345,0.3916482505280383,0.976,2.4349507643906807,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.95,0.9846666666666666,2.2602033790208185,0.3916482505280383,0.992,2.753431400136418,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.9,0.932,2.7393908833255214,0.5858862962139311,0.94,3.5199757412546693,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.95,0.9673333333333334,3.2641855681993706,0.5858862962139311,0.966,3.9741224058073636,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.9,0.9526666666666667,1.8895154847816398,0.3782825026634886,0.976,2.427868448446374,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.95,0.984,2.251496569495065,0.3782825026634886,1.0,2.741675687739425,500
+Linear,Logistic,experimental,False,1,0.9,0.8873333333333334,0.3449267364872548,0.08642860984541795,0.898,0.4421340023541829,500
+Linear,Logistic,experimental,False,1,0.95,0.946,0.411005556812322,0.08642860984541795,0.954,0.4995936732494318,500
+Linear,Logistic,experimental,False,4,0.9,0.6606666666666666,2.657325208820743,1.0944897316787332,0.578,3.4170145894791286,500
+Linear,Logistic,experimental,False,4,0.95,0.768,3.1663982856346244,1.0944897316787332,0.718,3.8571460284501917,500
+Linear,Logistic,experimental,False,6,0.9,0.9666666666666667,2.8130006694021406,0.5241010134911126,0.99,3.615745083978043,500
+Linear,Logistic,experimental,False,6,0.95,0.9886666666666666,3.351897038238965,0.5241010134911126,0.996,4.080907435373497,500
+Linear,Logistic,experimental,True,1,0.9,0.8866666666666666,0.34494936823919464,0.08627620913303025,0.892,0.44244261069510993,500
+Linear,Logistic,experimental,True,1,0.95,0.944,0.4110325242080722,0.08627620913303025,0.954,0.4997820751033549,500
+Linear,Logistic,experimental,True,4,0.9,0.662,2.658224241071875,1.0948743949781543,0.564,3.414305214743393,500
+Linear,Logistic,experimental,True,4,0.95,0.7673333333333334,3.167469548635955,1.0948743949781543,0.704,3.8565120655762,500
+Linear,Logistic,experimental,True,6,0.9,0.9653333333333334,2.8124774925935068,0.52557492082057,0.984,3.6161253198306906,500
+Linear,Logistic,experimental,True,6,0.95,0.9913333333333334,3.351273634620754,0.52557492082057,0.994,4.081566943051543,500
+Linear,Logistic,observational,False,1,0.9,0.948,0.3848384213393204,0.08045475266904066,0.972,0.49329329293532265,500
+Linear,Logistic,observational,False,1,0.95,0.9826666666666666,0.4585632626109482,0.08045475266904066,0.984,0.5578459151419344,500
+Linear,Logistic,observational,False,4,0.9,0.7826666666666666,3.561955924053432,1.1426462614848412,0.77,4.570526668350063,500
+Linear,Logistic,observational,False,4,0.95,0.8806666666666666,4.244332268399335,1.1426462614848412,0.858,5.1742268814347625,500
+Linear,Logistic,observational,False,6,0.9,0.9693333333333334,2.8849996002426015,0.542775483007719,0.988,3.7090559830760395,500
+Linear,Logistic,observational,False,6,0.95,0.9893333333333334,3.4376890558753503,0.542775483007719,0.998,4.185426374123738,500
+Linear,Logistic,observational,True,1,0.9,0.9486666666666667,0.38328722165443907,0.07985513409020634,0.976,0.49100462864725225,500
+Linear,Logistic,observational,True,1,0.95,0.9813333333333334,0.4567148941814533,0.07985513409020634,0.984,0.5552786616821493,500
+Linear,Logistic,observational,True,4,0.9,0.7926666666666666,3.574512574056902,1.132495088626377,0.788,4.589451201868081,500
+Linear,Logistic,observational,True,4,0.95,0.8913333333333334,4.25929443972572,1.132495088626377,0.872,5.1765558927503275,500
+Linear,Logistic,observational,True,6,0.9,0.9686666666666667,2.881358739293633,0.5417165951212739,0.984,3.6998331255899966,500
+Linear,Logistic,observational,True,6,0.95,0.9866666666666666,3.4333507024706638,0.5417165951212739,0.996,4.181041617405785,500
diff --git a/results/did/did_multi_detailed.csv b/results/did/did_multi_detailed.csv
deleted file mode 100644
index 3384909..0000000
--- a/results/did/did_multi_detailed.csv
+++ /dev/null
@@ -1,49 +0,0 @@
-Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,experimental,False,1,0.9,0.4084166666666667,0.6711379447655711,0.45573140858217215,0.079,1.0050739084247675,1000
-LGBM,LGBM,experimental,False,1,0.95,0.49625,0.7997101862715227,0.45573140858217215,0.132,1.115181838446232,1000
-LGBM,LGBM,experimental,False,4,0.9,0.5408333333333334,0.5829183572257842,0.3256247127712507,0.227,0.8979506620328224,1000
-LGBM,LGBM,experimental,False,4,0.95,0.62975,0.6945900640455575,0.3256247127712507,0.299,0.9884673580918313,1000
-LGBM,LGBM,experimental,False,6,0.9,0.8979166666666666,0.5800302078090503,0.14243775358828042,0.89,0.892704703388473,1000
-LGBM,LGBM,experimental,False,6,0.95,0.95,0.691148621751838,0.14243775358828042,0.955,0.9829906161123636,1000
-LGBM,LGBM,experimental,True,1,0.9,0.4105833333333333,0.671233414997924,0.45509646764918477,0.081,1.005300983475438,1000
-LGBM,LGBM,experimental,True,1,0.95,0.4990833333333333,0.7998239460699273,0.45509646764918477,0.139,1.1154305467306445,1000
-LGBM,LGBM,experimental,True,4,0.9,0.5370833333333334,0.5828484393127565,0.3258110613161524,0.212,0.8973624125797166,1000
-LGBM,LGBM,experimental,True,4,0.95,0.6318333333333334,0.6945067517135889,0.3258110613161524,0.305,0.9879336259779586,1000
-LGBM,LGBM,experimental,True,6,0.9,0.89625,0.5799071420563836,0.14138962778278252,0.903,0.8926727198727438,1000
-LGBM,LGBM,experimental,True,6,0.95,0.947,0.6910019798628547,0.14138962778278252,0.954,0.982776272537425,1000
-LGBM,LGBM,observational,False,1,0.9,0.90725,2.7320711716537267,0.7081787206178205,0.946,4.2481549947682335,1000
-LGBM,LGBM,observational,False,1,0.95,0.9646666666666667,3.255463593782398,0.7081787206178205,0.985,4.664072261741128,1000
-LGBM,LGBM,observational,False,4,0.9,0.9076666666666666,3.5140296061122283,0.9722309202462336,0.972,5.406999616722633,1000
-LGBM,LGBM,observational,False,4,0.95,0.9648333333333333,4.187224538241929,0.9722309202462336,0.995,5.954805878504198,1000
-LGBM,LGBM,observational,False,6,0.9,0.92525,2.166594877755592,0.5140262158838963,0.96,3.3799046023620645,1000
-LGBM,LGBM,observational,False,6,0.95,0.9675833333333334,2.5816570300909847,0.5140262158838963,0.984,3.7082268657228123,1000
-LGBM,LGBM,observational,True,1,0.9,0.9088333333333334,1.1285770515711326,0.2779542066229801,0.934,1.7607428145840895,1000
-LGBM,LGBM,observational,True,1,0.95,0.9595833333333333,1.3447825013812504,0.2779542066229801,0.971,1.9310262593666496,1000
-LGBM,LGBM,observational,True,4,0.9,0.92175,1.4119321412730104,0.3268873239533469,0.941,2.1833548301330525,1000
-LGBM,LGBM,observational,True,4,0.95,0.9650833333333334,1.6824209158589551,0.3268873239533469,0.975,2.400470983600388,1000
-LGBM,LGBM,observational,True,6,0.9,0.9054166666666666,1.0205289656802177,0.2486678941122539,0.917,1.597156856017389,1000
-LGBM,LGBM,observational,True,6,0.95,0.9546666666666667,1.2160352660803362,0.2486678941122539,0.957,1.7513439251743073,1000
-Linear,Linear,experimental,False,1,0.9,0.84575,0.2947177158765365,0.08161611344312397,0.752,0.4590941708451621,1000
-Linear,Linear,experimental,False,1,0.95,0.9100833333333334,0.35117781865763664,0.08161611344312397,0.853,0.504099184053661,1000
-Linear,Linear,experimental,False,4,0.9,0.3073333333333333,0.974825708310707,0.808795845992618,0.033,1.4112500397019019,1000
-Linear,Linear,experimental,False,4,0.95,0.38408333333333333,1.1615764759772769,0.808795845992618,0.069,1.5731674507528597,1000
-Linear,Linear,experimental,False,6,0.9,0.8911666666666667,0.9832819402273247,0.243739180309483,0.893,1.4205928620859418,1000
-Linear,Linear,experimental,False,6,0.95,0.9423333333333334,1.171652697794173,0.243739180309483,0.949,1.5853446204128676,1000
-Linear,Linear,experimental,True,1,0.9,0.8463333333333334,0.2947197524013989,0.08159475357883039,0.759,0.4593380570357237,1000
-Linear,Linear,experimental,True,1,0.95,0.9099166666666666,0.351180245326684,0.08159475357883039,0.86,0.5045635273614648,1000
-Linear,Linear,experimental,True,4,0.9,0.30625,0.9748445034208943,0.8085550484761943,0.034,1.4110989059663714,1000
-Linear,Linear,experimental,True,4,0.95,0.38475,1.1615988717324064,0.8085550484761943,0.068,1.574069732455078,1000
-Linear,Linear,experimental,True,6,0.9,0.89,0.9832818903405501,0.24368928454372696,0.893,1.4193554640817743,1000
-Linear,Linear,experimental,True,6,0.95,0.94225,1.1716526383504147,0.24368928454372696,0.952,1.5837169582518265,1000
-Linear,Linear,observational,False,1,0.9,0.9005,0.3188204993145348,0.07710334427560887,0.894,0.495905282385802,1000
-Linear,Linear,observational,False,1,0.95,0.9494166666666666,0.37989805655090114,0.07710334427560887,0.948,0.5446250222430568,1000
-Linear,Linear,observational,False,4,0.9,0.42083333333333334,1.2366389453819784,0.7873737934686624,0.183,1.7673158966945293,1000
-Linear,Linear,observational,False,4,0.95,0.527,1.4735461898335716,0.7873737934686624,0.272,1.977244843861637,1000
-Linear,Linear,observational,False,6,0.9,0.8901666666666667,1.0315660555384851,0.2592484851241425,0.889,1.4876338955955155,1000
-Linear,Linear,observational,False,6,0.95,0.9431666666666666,1.2291867698140935,0.2592484851241425,0.937,1.6596741796186278,1000
-Linear,Linear,observational,True,1,0.9,0.8986666666666666,0.31665078693066356,0.07694111946321994,0.883,0.49251977204037817,1000
-Linear,Linear,observational,True,1,0.95,0.9495,0.37731268478315316,0.07694111946321994,0.944,0.5411752370665596,1000
-Linear,Linear,observational,True,4,0.9,0.417,1.2352689208194765,0.7872341789805914,0.183,1.7660298619152044,1000
-Linear,Linear,observational,True,4,0.95,0.5238333333333334,1.4719137048778035,0.7872341789805914,0.278,1.9751958823009785,1000
-Linear,Linear,observational,True,6,0.9,0.8873333333333334,1.023987089801951,0.259336442252307,0.88,1.4789961469734276,1000
-Linear,Linear,observational,True,6,0.95,0.9425,1.2201558751251838,0.259336442252307,0.947,1.6504862891485015,1000
diff --git a/results/did/did_multi_eventstudy.csv b/results/did/did_multi_eventstudy.csv
deleted file mode 100644
index 2977684..0000000
--- a/results/did/did_multi_eventstudy.csv
+++ /dev/null
@@ -1,49 +0,0 @@
-Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,experimental,False,1,0.9,0.2695,0.6664379809971152,0.5273245725879285,0.065,0.875601005136764,1000
-LGBM,LGBM,experimental,False,1,0.95,0.35933333333333334,0.7941098340189701,0.5273245725879285,0.123,0.993123730170564,1000
-LGBM,LGBM,experimental,False,4,0.9,0.398,0.5415820487655375,0.3712684690429321,0.2,0.7377555251101078,1000
-LGBM,LGBM,experimental,False,4,0.95,0.4905,0.6453348145154958,0.3712684690429321,0.303,0.8304953131373951,1000
-LGBM,LGBM,experimental,False,6,0.9,0.8975,0.5398952913714347,0.13395236250758816,0.89,0.7349862691616095,1000
-LGBM,LGBM,experimental,False,6,0.95,0.9498333333333334,0.6433249191126899,0.13395236250758816,0.943,0.8272174489329279,1000
-LGBM,LGBM,experimental,True,1,0.9,0.27066666666666667,0.6664609685957759,0.5268935068048141,0.073,0.8756450610322313,1000
-LGBM,LGBM,experimental,True,1,0.95,0.362,0.7941372254322411,0.5268935068048141,0.131,0.9924410930584766,1000
-LGBM,LGBM,experimental,True,4,0.9,0.3908333333333333,0.5414895331777497,0.37122437324957164,0.19,0.7376044296248573,1000
-LGBM,LGBM,experimental,True,4,0.95,0.4928333333333333,0.6452245753932411,0.37122437324957164,0.294,0.8300401173811606,1000
-LGBM,LGBM,experimental,True,6,0.9,0.8971666666666667,0.5397522456658546,0.13239266622447163,0.893,0.7343809360201817,1000
-LGBM,LGBM,experimental,True,6,0.95,0.9475,0.6431544696413897,0.13239266622447163,0.946,0.8273225484496115,1000
-LGBM,LGBM,observational,False,1,0.9,0.902,2.653721378497191,0.7070490800856716,0.933,3.6804836186191254,1000
-LGBM,LGBM,observational,False,1,0.95,0.958,3.162104056941675,0.7070490800856716,0.973,4.1198480194456915,1000
-LGBM,LGBM,observational,False,4,0.9,0.901,3.561000033087922,1.0237738258563158,0.94,4.872767022094824,1000
-LGBM,LGBM,observational,False,4,0.95,0.9645,4.243193254061008,1.0237738258563158,0.99,5.476875359796976,1000
-LGBM,LGBM,observational,False,6,0.9,0.932,2.0204226296120424,0.46987471691349253,0.953,2.8163439845272875,1000
-LGBM,LGBM,observational,False,6,0.95,0.9728333333333333,2.4074820535421066,0.46987471691349253,0.982,3.1491467155171455,1000
-LGBM,LGBM,observational,True,1,0.9,0.9123333333333333,1.07183187495378,0.2603956402406786,0.938,1.4925039393303094,1000
-LGBM,LGBM,observational,True,1,0.95,0.9661666666666666,1.2771664529718216,0.2603956402406786,0.969,1.6679753377448512,1000
-LGBM,LGBM,observational,True,4,0.9,0.936,1.3868740368469075,0.30530138471391266,0.942,1.9070687685924095,1000
-LGBM,LGBM,observational,True,4,0.95,0.9701666666666666,1.6525623427973326,0.30530138471391266,0.973,2.1382550302116794,1000
-LGBM,LGBM,observational,True,6,0.9,0.9173333333333333,0.9435499321525795,0.22352810241642249,0.928,1.3193862165661938,1000
-LGBM,LGBM,observational,True,6,0.95,0.96,1.1243090900810153,0.22352810241642249,0.965,1.4751970915806725,1000
-Linear,Linear,experimental,False,1,0.9,0.8038333333333334,0.21012774921404478,0.06511858439798791,0.723,0.29993875767609723,1000
-Linear,Linear,experimental,False,1,0.95,0.8825,0.25038265646487395,0.06511858439798791,0.819,0.3333237643577245,1000
-Linear,Linear,experimental,False,4,0.9,0.18866666666666665,0.9724995300651125,0.9445552734461886,0.035,1.2537683002940827,1000
-Linear,Linear,experimental,False,4,0.95,0.2515,1.1588046636358738,0.9445552734461886,0.059,1.4271667404908353,1000
-Linear,Linear,experimental,False,6,0.9,0.8888333333333334,0.9839291437722918,0.24625318596473464,0.885,1.265266070206284,1000
-Linear,Linear,experimental,False,6,0.95,0.941,1.172423888384033,0.24625318596473464,0.936,1.4400279208463234,1000
-Linear,Linear,experimental,True,1,0.9,0.8053333333333333,0.21013098816621933,0.06512604019501264,0.732,0.300014012964527,1000
-Linear,Linear,experimental,True,1,0.95,0.8816666666666666,0.2503865159144358,0.06512604019501264,0.824,0.333092537873174,1000
-Linear,Linear,experimental,True,4,0.9,0.18933333333333333,0.972518967504493,0.9443719698591424,0.037,1.2555177892281149,1000
-Linear,Linear,experimental,True,4,0.95,0.252,1.1588278247734445,0.9443719698591424,0.06,1.4274806534380786,1000
-Linear,Linear,experimental,True,6,0.9,0.8875,0.9839063096253385,0.24613101616711022,0.885,1.2651613665652979,1000
-Linear,Linear,experimental,True,6,0.95,0.9415,1.1723966798197494,0.24613101616711022,0.941,1.4397415352658391,1000
-Linear,Linear,observational,False,1,0.9,0.8916666666666666,0.22637212149559552,0.054683601823785656,0.891,0.3228630405513039,1000
-Linear,Linear,observational,False,1,0.95,0.9456666666666667,0.2697390199136439,0.054683601823785656,0.949,0.358552269863231,1000
-Linear,Linear,observational,False,4,0.9,0.3156666666666667,1.2872024456860576,0.9190699365216015,0.185,1.6388325707815383,1000
-Linear,Linear,observational,False,4,0.95,0.4146666666666667,1.5337963165952757,0.9190699365216015,0.262,1.8700830647518032,1000
-Linear,Linear,observational,False,6,0.9,0.8868333333333334,1.0372399263148084,0.26320551491048766,0.877,1.3311336840392576,1000
-Linear,Linear,observational,False,6,0.95,0.9406666666666667,1.2359476038435253,0.26320551491048766,0.943,1.5174947513822885,1000
-Linear,Linear,observational,True,1,0.9,0.8923333333333334,0.22495023196577907,0.05470760964666116,0.891,0.3207319286670861,1000
-Linear,Linear,observational,True,1,0.95,0.9435,0.2680447340375201,0.05470760964666116,0.94,0.35645975459527846,1000
-Linear,Linear,observational,True,4,0.9,0.3105,1.2870401885570502,0.9190767720949804,0.177,1.6393372900395775,1000
-Linear,Linear,observational,True,4,0.95,0.41433333333333333,1.533602975301024,0.9190767720949804,0.257,1.8708048686939287,1000
-Linear,Linear,observational,True,6,0.9,0.8815,1.028838214628254,0.26320130535902825,0.884,1.3218761748802963,1000
-Linear,Linear,observational,True,6,0.95,0.9405,1.225936346887698,0.26320130535902825,0.935,1.5055011269416756,1000
diff --git a/results/did/did_multi_group.csv b/results/did/did_multi_group.csv
deleted file mode 100644
index d6d461b..0000000
--- a/results/did/did_multi_group.csv
+++ /dev/null
@@ -1,49 +0,0 @@
-Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,experimental,False,1,0.9,0.3723333333333333,0.712829716785441,0.5120534147173762,0.073,0.8873091560105003,1000
-LGBM,LGBM,experimental,False,1,0.95,0.4606666666666666,0.849388996757564,0.5120534147173762,0.122,1.0124899542984358,1000
-LGBM,LGBM,experimental,False,4,0.9,0.504,0.608382718738708,0.36202329008148987,0.219,0.7745653409497698,1000
-LGBM,LGBM,experimental,False,4,0.95,0.589,0.7249327222838707,0.36202329008148987,0.31,0.8776439618079263,1000
-LGBM,LGBM,experimental,False,6,0.9,0.8986666666666666,0.6042439878313273,0.14850794162785066,0.899,0.7675396748306148,1000
-LGBM,LGBM,experimental,False,6,0.95,0.9503333333333334,0.7200011202329313,0.14850794162785066,0.945,0.8700300494781059,1000
-LGBM,LGBM,experimental,True,1,0.9,0.3866666666666666,0.7129187295177013,0.5108468478621639,0.076,0.8873896060492763,1000
-LGBM,LGBM,experimental,True,1,0.95,0.4633333333333333,0.8494950619700162,0.5108468478621639,0.135,1.0122432445151552,1000
-LGBM,LGBM,experimental,True,4,0.9,0.5016666666666666,0.608263026948519,0.3616198619719443,0.223,0.7738821674571664,1000
-LGBM,LGBM,experimental,True,4,0.95,0.5966666666666667,0.7247901007191481,0.3616198619719443,0.308,0.877557935434779,1000
-LGBM,LGBM,experimental,True,6,0.9,0.8976666666666666,0.6041013460350053,0.14653383224031008,0.898,0.7676921917774191,1000
-LGBM,LGBM,experimental,True,6,0.95,0.947,0.7198311520491973,0.14653383224031008,0.954,0.8702372370829647,1000
-LGBM,LGBM,observational,False,1,0.9,0.9093333333333333,2.665571501346606,0.6783738614604253,0.937,3.372964798983418,1000
-LGBM,LGBM,observational,False,1,0.95,0.9673333333333334,3.1762243492380025,0.6783738614604253,0.98,3.825961439092819,1000
-LGBM,LGBM,observational,False,4,0.9,0.909,3.534531876327934,0.9946875737404597,0.935,4.445144884485254,1000
-LGBM,LGBM,observational,False,4,0.95,0.967,4.211654500012185,0.9946875737404597,0.976,5.055292173043325,1000
-LGBM,LGBM,observational,False,6,0.9,0.9416666666666667,2.122820536068101,0.4787787714125554,0.957,2.6963338440693168,1000
-LGBM,LGBM,observational,False,6,0.95,0.9766666666666667,2.5294966847881346,0.4787787714125554,0.986,3.0585559742251704,1000
-LGBM,LGBM,observational,True,1,0.9,0.921,1.1147256542022042,0.2672559046096273,0.934,1.4159126956485517,1000
-LGBM,LGBM,observational,True,1,0.95,0.966,1.3282775434118486,0.2672559046096273,0.968,1.6037050855151684,1000
-LGBM,LGBM,observational,True,4,0.9,0.94,1.422767817772445,0.30810115099970276,0.936,1.7975736984737825,1000
-LGBM,LGBM,observational,True,4,0.95,0.9706666666666667,1.6953324207728482,0.30810115099970276,0.972,2.041855425299719,1000
-LGBM,LGBM,observational,True,6,0.9,0.91,1.006741237739991,0.237584782868055,0.927,1.2840461885633785,1000
-LGBM,LGBM,observational,True,6,0.95,0.9603333333333334,1.1996061749145979,0.237584782868055,0.969,1.4521486558429435,1000
-Linear,Linear,experimental,False,1,0.9,0.809,0.2639472488371848,0.07836725122654463,0.751,0.33918155040956693,1000
-Linear,Linear,experimental,False,1,0.95,0.8853333333333334,0.3145125457139394,0.07836725122654463,0.833,0.3826104197586172,1000
-Linear,Linear,experimental,False,4,0.9,0.29933333333333334,1.0775733624029018,0.9144760793156517,0.031,1.3581931890994936,1000
-Linear,Linear,experimental,False,4,0.95,0.376,1.28400785723636,0.9144760793156517,0.066,1.544378342112665,1000
-Linear,Linear,experimental,False,6,0.9,0.8916666666666666,1.0855871936277797,0.2674422239505616,0.886,1.3663373525384621,1000
-Linear,Linear,experimental,False,6,0.95,0.9413333333333334,1.293556926114941,0.2674422239505616,0.947,1.554433869195187,1000
-Linear,Linear,experimental,True,1,0.9,0.809,0.26395101798234016,0.07841196950190694,0.751,0.3392834245796571,1000
-Linear,Linear,experimental,True,1,0.95,0.8856666666666666,0.3145170369274041,0.07841196950190694,0.832,0.38257509241292276,1000
-Linear,Linear,experimental,True,4,0.9,0.29733333333333334,1.0775450811401135,0.9139676817765434,0.033,1.3594859465438789,1000
-Linear,Linear,experimental,True,4,0.95,0.37666666666666665,1.283974158033225,0.9139676817765434,0.065,1.545038224259449,1000
-Linear,Linear,experimental,True,6,0.9,0.8946666666666666,1.0856219017160036,0.2672586672222071,0.896,1.366450981317095,1000
-Linear,Linear,experimental,True,6,0.95,0.942,1.2935982833529203,0.2672586672222071,0.952,1.5542102827957853,1000
-Linear,Linear,observational,False,1,0.9,0.8963333333333334,0.28417672167828534,0.0704315330527404,0.888,0.36504143473823225,1000
-Linear,Linear,observational,False,1,0.95,0.945,0.33861744936319155,0.0704315330527404,0.941,0.41189946472127525,1000
-Linear,Linear,observational,False,4,0.9,0.408,1.3757050815726335,0.8986741240953303,0.193,1.723360009232728,1000
-Linear,Linear,observational,False,4,0.95,0.5103333333333333,1.639253711728994,0.8986741240953303,0.289,1.9608953756229532,1000
-Linear,Linear,observational,False,6,0.9,0.8906666666666666,1.1373073172966048,0.284219716886748,0.88,1.4298799083888758,1000
-Linear,Linear,observational,False,6,0.95,0.9396666666666667,1.35518525462143,0.284219716886748,0.944,1.6242483562467662,1000
-Linear,Linear,observational,True,1,0.9,0.8976666666666666,0.28247229568225285,0.07026934090791014,0.889,0.3628745212929247,1000
-Linear,Linear,observational,True,1,0.95,0.9476666666666667,0.33658650052263794,0.07026934090791014,0.945,0.40971246171295345,1000
-Linear,Linear,observational,True,4,0.9,0.4056666666666666,1.3749682398133307,0.8988135221197031,0.198,1.7216124241871145,1000
-Linear,Linear,observational,True,4,0.95,0.505,1.638375710618819,0.8988135221197031,0.282,1.959504408369747,1000
-Linear,Linear,observational,True,6,0.9,0.8893333333333334,1.1272233554666016,0.2838441687177051,0.879,1.416566966857148,1000
-Linear,Linear,observational,True,6,0.95,0.943,1.3431694729832098,0.2838441687177051,0.945,1.6114267967021605,1000
diff --git a/results/did/did_multi_metadata.csv b/results/did/did_multi_metadata.csv
deleted file mode 100644
index 191c4eb..0000000
--- a/results/did/did_multi_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
-0.10.dev0,DIDMultiCoverageSimulation,2025-05-23 09:01,152.65548847913743,3.12.9,scripts/did/did_pa_multi_config.yml
diff --git a/results/did/did_multi_time.csv b/results/did/did_multi_time.csv
deleted file mode 100644
index 333f774..0000000
--- a/results/did/did_multi_time.csv
+++ /dev/null
@@ -1,49 +0,0 @@
-Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,experimental,False,1,0.9,0.105,0.6770474680143006,0.5882187359928296,0.06,0.8037202643084942,1000
-LGBM,LGBM,experimental,False,1,0.95,0.17066666666666666,0.8067518175410349,0.5882187359928296,0.118,0.926841610902776,1000
-LGBM,LGBM,experimental,False,4,0.9,0.236,0.5444213002720629,0.41426795329176225,0.178,0.6607984097007097,1000
-LGBM,LGBM,experimental,False,4,0.95,0.3276666666666666,0.6487179913554645,0.41426795329176225,0.267,0.7587562686524103,1000
-LGBM,LGBM,experimental,False,6,0.9,0.8943333333333334,0.53982673853458,0.1342938994875208,0.908,0.6564456680014384,1000
-LGBM,LGBM,experimental,False,6,0.95,0.95,0.6432432333693072,0.1342938994875208,0.951,0.7533485914407448,1000
-LGBM,LGBM,experimental,True,1,0.9,0.104,0.6773164375641183,0.5871963271333144,0.063,0.8038330391648153,1000
-LGBM,LGBM,experimental,True,1,0.95,0.17566666666666667,0.8070723145274795,0.5871963271333144,0.136,0.9270642436123595,1000
-LGBM,LGBM,experimental,True,4,0.9,0.2313333333333333,0.5443770179339803,0.414059062792942,0.171,0.6603179410869597,1000
-LGBM,LGBM,experimental,True,4,0.95,0.327,0.6486652256951217,0.414059062792942,0.258,0.7584758454668876,1000
-LGBM,LGBM,experimental,True,6,0.9,0.9013333333333333,0.5395668449941098,0.13154818699839163,0.895,0.6563984364887405,1000
-LGBM,LGBM,experimental,True,6,0.95,0.9493333333333334,0.6429335511150388,0.13154818699839163,0.942,0.7529872439559875,1000
-LGBM,LGBM,observational,False,1,0.9,0.8973333333333333,2.8900944626182756,0.7577316621702538,0.915,3.57007703127756,1000
-LGBM,LGBM,observational,False,1,0.95,0.958,3.4437599588413588,0.7577316621702538,0.968,4.083723276817505,1000
-LGBM,LGBM,observational,False,4,0.9,0.8883333333333334,3.944278944663819,1.1508934356199159,0.921,4.817695497037129,1000
-LGBM,LGBM,observational,False,4,0.95,0.959,4.699898246173131,1.1508934356199159,0.981,5.525753606365474,1000
-LGBM,LGBM,observational,False,6,0.9,0.9346666666666666,2.007004968725184,0.4621685573032238,0.943,2.5048024891920218,1000
-LGBM,LGBM,observational,False,6,0.95,0.973,2.391493924468424,0.4621685573032238,0.976,2.8553234091559982,1000
-LGBM,LGBM,observational,True,1,0.9,0.9203333333333333,1.1214028688438904,0.2681238910047797,0.932,1.3877940365894852,1000
-LGBM,LGBM,observational,True,1,0.95,0.966,1.336233935397319,0.2681238910047797,0.965,1.5830862634102072,1000
-LGBM,LGBM,observational,True,4,0.9,0.946,1.4738907100586922,0.3155685506978381,0.947,1.8023292945303246,1000
-LGBM,LGBM,observational,True,4,0.95,0.975,1.756249104193653,0.3155685506978381,0.978,2.0652293790201095,1000
-LGBM,LGBM,observational,True,6,0.9,0.9036666666666666,0.9243181245460811,0.22145829614971912,0.904,1.1513913386950982,1000
-LGBM,LGBM,observational,True,6,0.95,0.9506666666666667,1.101392977881901,0.22145829614971912,0.964,1.312498872874679,1000
-Linear,Linear,experimental,False,1,0.9,0.785,0.24421028738791545,0.07700598814984021,0.726,0.31283981302906455,1000
-Linear,Linear,experimental,False,1,0.95,0.8656666666666666,0.29099450558503215,0.07700598814984021,0.822,0.3536648083533155,1000
-Linear,Linear,experimental,False,4,0.9,0.029,0.9657408497851493,1.0738907262320931,0.024,1.1066254403295672,1000
-Linear,Linear,experimental,False,4,0.95,0.05633333333333333,1.1507511993551036,1.0738907262320931,0.042,1.2857511031369329,1000
-Linear,Linear,experimental,False,6,0.9,0.891,0.9640961231527081,0.2403665468723713,0.887,1.1081712902827776,1000
-Linear,Linear,experimental,False,6,0.95,0.944,1.1487913866938562,0.2403665468723713,0.945,1.2857969535196567,1000
-Linear,Linear,experimental,True,1,0.9,0.7856666666666666,0.24421161220168236,0.07699427764290885,0.725,0.312745321017087,1000
-Linear,Linear,experimental,True,1,0.95,0.866,0.2909960841980021,0.07699427764290885,0.82,0.35353627547158184,1000
-Linear,Linear,experimental,True,4,0.9,0.029333333333333333,0.9658114690308013,1.073411274232068,0.025,1.1072936001699796,1000
-Linear,Linear,experimental,True,4,0.95,0.056,1.1508353473764386,1.073411274232068,0.047,1.2871025490442616,1000
-Linear,Linear,experimental,True,6,0.9,0.8933333333333334,0.964156052938097,0.23997981769673798,0.891,1.1072565217168384,1000
-Linear,Linear,experimental,True,6,0.95,0.944,1.1488627974376686,0.23997981769673798,0.947,1.287096789672995,1000
-Linear,Linear,observational,False,1,0.9,0.889,0.2746570982471215,0.06835594688685424,0.877,0.35201642493170093,1000
-Linear,Linear,observational,False,1,0.95,0.9386666666666666,0.32727411840307125,0.06835594688685424,0.932,0.39803367647348065,1000
-Linear,Linear,observational,False,4,0.9,0.16033333333333336,1.3480635210832488,1.0552310812916714,0.131,1.518276214952089,1000
-Linear,Linear,observational,False,4,0.95,0.24266666666666667,1.6063167608976374,1.0552310812916714,0.211,1.7707505491415911,1000
-Linear,Linear,observational,False,6,0.9,0.8903333333333334,1.0186244057373655,0.2564255464027685,0.88,1.171669804736589,1000
-Linear,Linear,observational,False,6,0.95,0.9423333333333334,1.2137658429333622,0.2564255464027685,0.942,1.359505036566505,1000
-Linear,Linear,observational,True,1,0.9,0.8853333333333334,0.27250071547513943,0.06816094516072957,0.876,0.349158428206522,1000
-Linear,Linear,observational,True,1,0.95,0.9393333333333334,0.3247046298475451,0.06816094516072957,0.927,0.39461799076066073,1000
-Linear,Linear,observational,True,4,0.9,0.15733333333333335,1.348359701155594,1.0540447813311582,0.131,1.518475986344912,1000
-Linear,Linear,observational,True,4,0.95,0.24566666666666664,1.6066696812215027,1.0540447813311582,0.213,1.7697898872953792,1000
-Linear,Linear,observational,True,6,0.9,0.886,1.007087091732094,0.2561924661932343,0.886,1.1588097256348897,1000
-Linear,Linear,observational,True,6,0.95,0.9426666666666667,1.2000182853646246,0.2561924661932343,0.938,1.3446481778510444,1000
diff --git a/results/did/did_pa_atte_coverage_metadata.csv b/results/did/did_pa_atte_coverage_metadata.csv
index 5a18477..1f17571 100644
--- a/results/did/did_pa_atte_coverage_metadata.csv
+++ b/results/did/did_pa_atte_coverage_metadata.csv
@@ -1,2 +1,2 @@
DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,did_pa_atte_coverage.py,2025-05-22 14:30:49,10714.369587659836,3.12.3
+0.11.dev0,did_pa_atte_coverage.py,2025-06-06 08:42:11,11024.603029727936,3.12.3
diff --git a/results/did/did_pa_multi_config.yml b/results/did/did_pa_multi_config.yml
index fa87158..83d9a59 100644
--- a/results/did/did_pa_multi_config.yml
+++ b/results/did/did_pa_multi_config.yml
@@ -1,7 +1,8 @@
-confidence_parameters:
- level:
- - 0.95
- - 0.9
+simulation_parameters:
+ repetitions: 500
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
dgp_parameters:
DGP:
- 1
@@ -9,115 +10,50 @@ dgp_parameters:
- 6
n_obs:
- 2000
-dml_parameters:
- in_sample_normalization:
- - true
- - false
- learners:
- - ml_g: !!python/tuple
- - Linear
- - !!python/object:sklearn.linear_model._base.LinearRegression
- _sklearn_version: 1.5.2
- copy_X: true
- fit_intercept: true
- n_jobs: null
- positive: false
- ml_m: !!python/tuple
- - Linear
- - !!python/object:sklearn.linear_model._logistic.LogisticRegression
- C: 1.0
- _sklearn_version: 1.5.2
- class_weight: null
- dual: false
- fit_intercept: true
- intercept_scaling: 1
- l1_ratio: null
- max_iter: 100
- multi_class: deprecated
- n_jobs: null
- penalty: l2
- random_state: null
- solver: lbfgs
- tol: 0.0001
- verbose: 0
- warm_start: false
- - ml_g: !!python/tuple
- - LGBM
- - !!python/object:lightgbm.sklearn.LGBMRegressor
- _Booster: null
- _best_iteration: -1
- _best_score: {}
- _class_map: null
- _class_weight: null
- _classes: null
- _evals_result: {}
- _n_classes: -1
- _n_features: -1
- _n_features_in: -1
- _objective: null
- _other_params:
- verbose: -1
- boosting_type: gbdt
- class_weight: null
- colsample_bytree: 1.0
- importance_type: split
- learning_rate: 0.02
- max_depth: -1
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logistic: &id002
+ name: Logistic
+ lgbmr: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 300
+ learning_rate: 0.03
+ num_leaves: 7
+ max_depth: 3
min_child_samples: 20
- min_child_weight: 0.001
- min_split_gain: 0.0
- n_estimators: 500
- n_jobs: 1
- num_leaves: 31
- objective: null
- random_state: null
- reg_alpha: 0.0
- reg_lambda: 0.0
- subsample: 1.0
- subsample_for_bin: 200000
- subsample_freq: 0
- verbose: -1
- ml_m: !!python/tuple
- - LGBM
- - !!python/object:lightgbm.sklearn.LGBMClassifier
- _Booster: null
- _best_iteration: -1
- _best_score: {}
- _class_map: null
- _class_weight: null
- _classes: null
- _evals_result: {}
- _n_classes: -1
- _n_features: -1
- _n_features_in: -1
- _objective: null
- _other_params:
- verbose: -1
- boosting_type: gbdt
- class_weight: null
- colsample_bytree: 1.0
- importance_type: split
- learning_rate: 0.02
- max_depth: -1
+ subsample: 0.8
+ colsample_bytree: 0.8
+ reg_alpha: 0.1
+ reg_lambda: 1.0
+ random_state: 42
+ lgbmc: &id004
+ name: LGBM Clas.
+ params:
+ n_estimators: 300
+ learning_rate: 0.03
+ num_leaves: 7
+ max_depth: 3
min_child_samples: 20
- min_child_weight: 0.001
- min_split_gain: 0.0
- n_estimators: 500
- n_jobs: 1
- num_leaves: 31
- objective: null
- random_state: null
- reg_alpha: 0.0
- reg_lambda: 0.0
- subsample: 1.0
- subsample_for_bin: 200000
- subsample_freq: 0
- verbose: -1
+ subsample: 0.8
+ colsample_bytree: 0.8
+ reg_alpha: 0.1
+ reg_lambda: 1.0
+ random_state: 42
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
score:
- observational
- experimental
-simulation_parameters:
- max_runtime: 19800
- n_jobs: -2
- random_seed: 42
- repetitions: 1000
+ in_sample_normalization:
+ - true
+ - false
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/did/did_pa_multi_coverage.csv b/results/did/did_pa_multi_coverage.csv
deleted file mode 100644
index 8276aac..0000000
--- a/results/did/did_pa_multi_coverage.csv
+++ /dev/null
@@ -1,25 +0,0 @@
-Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-Linear,Linear,experimental,False,1,0.9,0.8875,0.589658704307476,0.14271881238392356,0.85,0.9184713776325004,20
-Linear,Linear,experimental,False,1,0.95,0.9458333333333334,0.7026216829731943,0.14271881238392356,0.9,1.0088572704968817,20
-Linear,Linear,experimental,False,4,0.9,0.7041666666666667,2.0297642964886093,0.9047675553035617,0.55,2.9260160123442596,20
-Linear,Linear,experimental,False,4,0.95,0.7708333333333333,2.418613336188561,0.9047675553035617,0.65,3.265541044699371,20
-Linear,Linear,experimental,False,6,0.9,0.9375,1.9858835644872976,0.4370880265956636,0.9,2.8747704680291024,20
-Linear,Linear,experimental,False,6,0.95,0.975,2.3663262190076697,0.4370880265956636,0.9,3.2067165503022985,20
-Linear,Linear,experimental,True,1,0.9,0.9041666666666666,0.5893929319394263,0.1419165948760371,0.9,0.9179189354368003,20
-Linear,Linear,experimental,True,1,0.95,0.9458333333333334,0.7023049956638021,0.1419165948760371,0.9,1.0086412447991253,20
-Linear,Linear,experimental,True,4,0.9,0.7041666666666667,2.031873164323966,0.9059623323417508,0.6,2.943810800692373,20
-Linear,Linear,experimental,True,4,0.95,0.7833333333333333,2.4211262072050013,0.9059623323417508,0.6,3.2739360531515684,20
-Linear,Linear,experimental,True,6,0.9,0.9458333333333334,1.9877755956053036,0.4386853538456557,0.9,2.8801526124840118,20
-Linear,Linear,experimental,True,6,0.95,0.975,2.3685807131390373,0.4386853538456557,0.9,3.2111476829248415,20
-Linear,Linear,observational,False,1,0.9,0.9125,0.6827078489949769,0.15467502808332462,0.9,1.0590069349882765,20
-Linear,Linear,observational,False,1,0.95,0.9583333333333334,0.8134965774875249,0.15467502808332462,1.0,1.1738447754301748,20
-Linear,Linear,observational,False,4,0.9,0.8,2.7854153587870214,0.8355183226583197,0.7,4.0015833335633415,20
-Linear,Linear,observational,False,4,0.95,0.8416666666666666,3.3190271132668636,0.8355183226583197,0.8,4.483472762454467,20
-Linear,Linear,observational,False,6,0.9,0.9333333333333333,2.5337718344427866,0.5967437923241619,0.9,3.6412755250672886,20
-Linear,Linear,observational,False,6,0.95,0.9708333333333334,3.0191753595448407,0.5967437923241619,0.95,4.081839476947988,20
-Linear,Linear,observational,True,1,0.9,0.9041666666666666,0.6620805479575324,0.15571526121686724,0.85,1.0317138682400495,20
-Linear,Linear,observational,True,1,0.95,0.9541666666666666,0.7889176323040639,0.15571526121686724,0.95,1.1316182486711308,20
-Linear,Linear,observational,True,4,0.9,0.7916666666666667,2.631108576842307,0.8079463860509168,0.7,3.77565721661986,20
-Linear,Linear,observational,True,4,0.95,0.8416666666666666,3.1351592418487586,0.8079463860509168,0.75,4.24016752287414,20
-Linear,Linear,observational,True,6,0.9,0.9333333333333333,2.3368968698600474,0.5421819294976428,0.9,3.361076606239287,20
-Linear,Linear,observational,True,6,0.95,0.9791666666666666,2.784584369977626,0.5421819294976428,0.95,3.7810902213750666,20
diff --git a/results/did/did_pa_multi_coverage_metadata.csv b/results/did/did_pa_multi_coverage_metadata.csv
deleted file mode 100644
index 0cb88dc..0000000
--- a/results/did/did_pa_multi_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,did_pa_multi_coverage.py,2025-03-18 07:37:01,120.99546647071838,3.11.9
diff --git a/results/did/did_pa_multi_detailed.csv b/results/did/did_pa_multi_detailed.csv
new file mode 100644
index 0000000..03365be
--- /dev/null
+++ b/results/did/did_pa_multi_detailed.csv
@@ -0,0 +1,49 @@
+Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.9,0.3673333333333333,0.6622000671680726,0.4760789469976553,0.048,0.9872940132180921,500
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.95,0.455,0.7890600482274513,0.4760789469976553,0.09,1.095657479091583,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.9,0.3965,0.6378199657300768,0.4732109643228897,0.06,0.9698764406659528,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.95,0.47783333333333333,0.760009365555786,0.4732109643228897,0.11,1.070746913970544,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.9,0.9025,0.6328401432350504,0.14980045984980536,0.894,0.9623147617420064,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.95,0.9506666666666667,0.7540755410623912,0.14980045984980536,0.954,1.0622145320099126,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.9,0.3686666666666667,0.6623492813513598,0.4769578269080368,0.04,0.9884461202448822,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.95,0.4578333333333333,0.7892378478932902,0.4769578269080368,0.092,1.097904825887712,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.9,0.3965,0.6379252128844956,0.4725155159534973,0.062,0.9695080120086349,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.95,0.47883333333333333,0.7601347752753842,0.4725155159534973,0.096,1.071528428469871,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.9,0.9001666666666667,0.6328979630581827,0.15061501966635568,0.902,0.9622642309184252,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.95,0.9468333333333334,0.754144437631104,0.15061501966635568,0.948,1.0633435621867606,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.9,0.944,0.8484126499519333,0.18581715397253995,0.958,1.2981680901155388,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.95,0.9781666666666666,1.0109460262527277,0.18581715397253995,0.984,1.4315482130550377,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.9,0.9281666666666666,1.1736004812411396,0.24580858947268477,0.918,1.764688942768667,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.95,0.9648333333333333,1.3984312268166166,0.24580858947268477,0.956,1.9550467611394868,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.9,0.9358333333333334,0.8012781498847246,0.17217182529588215,0.944,1.222603881202094,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.95,0.9745,0.9547818052866056,0.17217182529588215,0.974,1.3500149303488194,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.9,0.9335,0.7612042001195903,0.16720500271357613,0.928,1.1687094154271858,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.95,0.9728333333333333,0.9070307489184465,0.16720500271357613,0.978,1.287704160787801,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.9,0.8948333333333334,1.0400365030584322,0.24325349291636317,0.868,1.5686170162912925,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.95,0.9411666666666666,1.2392799305671278,0.24325349291636317,0.932,1.7357363533233978,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.9,0.921,0.73959665400471,0.16444852263415236,0.928,1.1315883372939126,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.95,0.9616666666666667,0.881283769682401,0.16444852263415236,0.966,1.2475888364019896,500
+Linear,Logistic,experimental,False,1,0.9,0.8523333333333334,0.2946189987996892,0.08120823424066553,0.788,0.4588985695573376,500
+Linear,Logistic,experimental,False,1,0.95,0.9148333333333334,0.35106019000539096,0.08120823424066553,0.838,0.5038888198702418,500
+Linear,Logistic,experimental,False,4,0.9,0.303,0.9760460779046628,0.801904091446656,0.044,1.4120588709374915,500
+Linear,Logistic,experimental,False,4,0.95,0.3858333333333333,1.1630306360391751,0.801904091446656,0.084,1.5751591463324877,500
+Linear,Logistic,experimental,False,6,0.9,0.9053333333333333,0.9839860026189414,0.23896735315283404,0.908,1.4202557795653532,500
+Linear,Logistic,experimental,False,6,0.95,0.9548333333333334,1.17249163987864,0.23896735315283404,0.958,1.5845039985572327,500
+Linear,Logistic,experimental,True,1,0.9,0.8496666666666667,0.29463344342253167,0.08121981681344749,0.778,0.45909731515633667,500
+Linear,Logistic,experimental,True,1,0.95,0.9155,0.351077401835111,0.08121981681344749,0.84,0.5042920585797889,500
+Linear,Logistic,experimental,True,4,0.9,0.306,0.9759323896298842,0.8017723312893467,0.038,1.413106206153663,500
+Linear,Logistic,experimental,True,4,0.95,0.385,1.1628951681042905,0.8017723312893467,0.08,1.5751955097078019,500
+Linear,Logistic,experimental,True,6,0.9,0.9053333333333333,0.9838756962077249,0.239302923434742,0.902,1.4196661784164144,500
+Linear,Logistic,experimental,True,6,0.95,0.9531666666666666,1.1723602016827388,0.239302923434742,0.96,1.5824721580536725,500
+Linear,Logistic,observational,False,1,0.9,0.8971666666666667,0.31851350521257993,0.07770855334930929,0.88,0.49576482769471897,500
+Linear,Logistic,observational,False,1,0.95,0.9501666666666666,0.3795322505159817,0.07770855334930929,0.928,0.5439849562862227,500
+Linear,Logistic,observational,False,4,0.9,0.4216666666666667,1.24911253522529,0.7867195413105588,0.202,1.7850915494563109,500
+Linear,Logistic,observational,False,4,0.95,0.5311666666666667,1.4884093888746475,0.7867195413105588,0.298,1.9965811220360494,500
+Linear,Logistic,observational,False,6,0.9,0.8988333333333334,1.0283136039352265,0.25571834558315537,0.908,1.4829065942058384,500
+Linear,Logistic,observational,False,6,0.95,0.953,1.2253112346908492,0.25571834558315537,0.964,1.6547817994899618,500
+Linear,Logistic,observational,True,1,0.9,0.8981666666666667,0.3164160504010737,0.07747519197762118,0.882,0.4924111949311072,500
+Linear,Logistic,observational,True,1,0.95,0.9475,0.3770329789562555,0.07747519197762118,0.93,0.5411929319181428,500
+Linear,Logistic,observational,True,4,0.9,0.4211666666666667,1.2426528075031174,0.7872815865655027,0.18,1.7778308762859318,500
+Linear,Logistic,observational,True,4,0.95,0.5266666666666666,1.480712148537914,0.7872815865655027,0.304,1.9883615181855132,500
+Linear,Logistic,observational,True,6,0.9,0.8986666666666666,1.0213268010132648,0.2545466751950045,0.902,1.47468329239173,500
+Linear,Logistic,observational,True,6,0.95,0.952,1.216985945516332,0.2545466751950045,0.954,1.6464318066970816,500
diff --git a/results/did/did_pa_multi_eventstudy.csv b/results/did/did_pa_multi_eventstudy.csv
new file mode 100644
index 0000000..1b21f54
--- /dev/null
+++ b/results/did/did_pa_multi_eventstudy.csv
@@ -0,0 +1,49 @@
+Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.9,0.2283333333333333,0.6587040995328192,0.5522372239901979,0.044,0.8605047333386023,500
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.95,0.3133333333333333,0.7848943458549469,0.5522372239901979,0.09,0.9778072018001019,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.9,0.249,0.6060032559192339,0.5525104645406855,0.048,0.8132771274352789,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.95,0.3143333333333333,0.7220974174565562,0.5525104645406855,0.084,0.9185928338492869,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.9,0.897,0.6027467480888928,0.1422147849276291,0.906,0.8080325608850351,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.95,0.9483333333333334,0.7182170490406318,0.1422147849276291,0.94,0.9125249161446136,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.9,0.23933333333333331,0.6589685339305245,0.5529268397062738,0.034,0.8610471093710794,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.95,0.316,0.78520943887434,0.5529268397062738,0.08,0.9777737838033697,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.9,0.24166666666666667,0.606157835216858,0.5522849609818471,0.046,0.8135265003351018,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.95,0.3133333333333333,0.7222816100504348,0.5522849609818471,0.086,0.9194321897734152,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.9,0.8986666666666666,0.6028976065833622,0.14209512928525467,0.904,0.8084770787989722,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.95,0.9536666666666667,0.7183968080240921,0.14209512928525467,0.944,0.9131416082608758,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.9,0.9593333333333334,0.8331450685574368,0.17680186027028366,0.972,1.1238811170215592,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.95,0.9836666666666666,0.992753580934838,0.17680186027028366,0.99,1.2675560428429777,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.9,0.9306666666666666,1.2212757793823452,0.24645513111977768,0.912,1.6164788843207358,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.95,0.961,1.4552398484336997,0.24645513111977768,0.954,1.8275978344330392,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.9,0.949,0.773488793816433,0.15895185287272048,0.964,1.039059805501273,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.95,0.9773333333333334,0.9216687451607893,0.15895185287272048,0.978,1.172440550055884,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.9,0.9503333333333334,0.7366626265166267,0.1546838735092071,0.954,0.9998899983927642,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.95,0.9793333333333334,0.8777876602948735,0.1546838735092071,0.972,1.1253847911000208,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.9,0.894,1.0659193270745397,0.24400925469037812,0.878,1.4176012914565177,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.95,0.9386666666666666,1.2701212176327616,0.24400925469037812,0.932,1.6011895710715864,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.9,0.929,0.7088924236942643,0.1529529479458035,0.922,0.9575952981768475,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.95,0.9676666666666667,0.8446974226692439,0.1529529479458035,0.964,1.0780717858478737,500
+Linear,Logistic,experimental,False,1,0.9,0.8066666666666666,0.21000404129150896,0.06518921912381406,0.72,0.2997175999884337,500
+Linear,Logistic,experimental,False,1,0.95,0.8743333333333334,0.2502352493832956,0.06518921912381406,0.826,0.33259531990160307,500
+Linear,Logistic,experimental,False,4,0.9,0.18733333333333332,0.9737076191653701,0.9359069321716494,0.046,1.2560167106299942,500
+Linear,Logistic,experimental,False,4,0.95,0.24966666666666665,1.1602441905869794,0.9359069321716494,0.078,1.4300407974756533,500
+Linear,Logistic,experimental,False,6,0.9,0.9033333333333333,0.9849320456939884,0.238799855722236,0.9,1.268040524471122,500
+Linear,Logistic,experimental,False,6,0.95,0.954,1.1736189197317124,0.238799855722236,0.952,1.4422169658618964,500
+Linear,Logistic,experimental,True,1,0.9,0.8056666666666666,0.21001870515867865,0.0651788255038496,0.712,0.29983155338511797,500
+Linear,Logistic,experimental,True,1,0.95,0.875,0.25025272245874486,0.0651788255038496,0.828,0.33303705806938594,500
+Linear,Logistic,experimental,True,4,0.9,0.18733333333333332,0.9735509986703372,0.9357636327849428,0.046,1.2571544126957659,500
+Linear,Logistic,experimental,True,4,0.95,0.24533333333333332,1.1600575657563712,0.9357636327849428,0.07,1.4293605600664439,500
+Linear,Logistic,experimental,True,6,0.9,0.9013333333333333,0.9848219664599364,0.2394113999920333,0.894,1.2656123393584287,500
+Linear,Logistic,experimental,True,6,0.95,0.9516666666666667,1.173487752234099,0.2394113999920333,0.948,1.4411013050595978,500
+Linear,Logistic,observational,False,1,0.9,0.9,0.2262316323769462,0.05514479620759452,0.868,0.3226096040264856,500
+Linear,Logistic,observational,False,1,0.95,0.9456666666666667,0.26957161680356734,0.05514479620759452,0.938,0.35829469003252984,500
+Linear,Logistic,observational,False,4,0.9,0.3143333333333333,1.3036480521122706,0.9181321615807071,0.162,1.6605958923938304,500
+Linear,Logistic,observational,False,4,0.95,0.4136666666666667,1.5533924653170545,0.9181321615807071,0.274,1.8912981546144405,500
+Linear,Logistic,observational,False,6,0.9,0.8973333333333333,1.0343982597104093,0.25858226125551537,0.894,1.327566286989776,500
+Linear,Logistic,observational,False,6,0.95,0.9526666666666667,1.232561549236943,0.25858226125551537,0.95,1.5120978280671449,500
+Linear,Logistic,observational,True,1,0.9,0.8973333333333333,0.2248488438887826,0.054965685359671994,0.864,0.32055024716071057,500
+Linear,Logistic,observational,True,1,0.95,0.944,0.26792392269229237,0.054965685359671994,0.944,0.35608984664695953,500
+Linear,Logistic,observational,True,4,0.9,0.315,1.2959957518029153,0.9185347013069396,0.176,1.6506152551239353,500
+Linear,Logistic,observational,True,4,0.95,0.415,1.5442741870949255,0.9185347013069396,0.27,1.8834083270874074,500
+Linear,Logistic,observational,True,6,0.9,0.8976666666666666,1.0256086410426875,0.2552198325642982,0.898,1.3171396739917884,500
+Linear,Logistic,observational,True,6,0.95,0.9516666666666667,1.2220880726039465,0.2552198325642982,0.95,1.5001030847712007,500
diff --git a/results/did/did_pa_multi_group.csv b/results/did/did_pa_multi_group.csv
new file mode 100644
index 0000000..5f21488
--- /dev/null
+++ b/results/did/did_pa_multi_group.csv
@@ -0,0 +1,49 @@
+Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.9,0.3453333333333333,0.7061203437445056,0.5338039317070226,0.04,0.8782510332132877,500
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.95,0.414,0.8413942856759162,0.5338039317070226,0.082,1.00242141790932,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.9,0.38266666666666665,0.6789378060682322,0.5300834738006545,0.07,0.8626680965851735,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.95,0.4586666666666666,0.8090042942621264,0.5300834738006545,0.1,0.9769404822124568,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.9,0.904,0.671760519005158,0.15787429787372864,0.894,0.8515944211885667,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.95,0.9526666666666667,0.8004520292338988,0.15787429787372864,0.94,0.9665782875756674,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.9,0.336,0.7063220141736366,0.5352392838724863,0.044,0.8791666491424662,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.95,0.422,0.841634590813934,0.5352392838724863,0.068,1.0022365497478951,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.9,0.386,0.6790558361796843,0.5286816076588884,0.07,0.8622484845396491,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.95,0.4633333333333333,0.8091449358145096,0.5286816076588884,0.104,0.9779355790699585,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.9,0.9093333333333333,0.6717901642088375,0.15785991984946784,0.892,0.8520313806974089,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.95,0.946,0.8004873536728483,0.15785991984946784,0.95,0.9660175472034508,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.9,0.9553333333333334,0.8759634723754738,0.18292488179652266,0.97,1.1038974123150518,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.95,0.988,1.0437748560098636,0.18292488179652266,0.986,1.2543507631992874,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.9,0.9353333333333333,1.25393645199677,0.250515962227722,0.94,1.5679425502380615,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.95,0.9713333333333334,1.494157440240192,0.250515962227722,0.972,1.7877154502952064,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.9,0.9493333333333334,0.8388307882552883,0.1702924604272122,0.956,1.059683094715299,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.95,0.9773333333333334,0.9995285338251039,0.1702924604272122,0.976,1.2031072528792466,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.9,0.9513333333333334,0.7861399021677411,0.16271564704380237,0.96,0.9930031807156378,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.95,0.9786666666666666,0.9367434705508129,0.16271564704380237,0.984,1.1276942507017356,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.9,0.9046666666666666,1.1057158916792769,0.24870110692650177,0.902,1.38678453123333,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.95,0.9486666666666667,1.3175417491959676,0.24870110692650177,0.954,1.5778432342279969,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.9,0.9313333333333333,0.7753781852936138,0.16531074136379498,0.946,0.9820944944614727,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.95,0.9726666666666667,0.9239200939661141,0.16531074136379498,0.978,1.1150696723460385,500
+Linear,Logistic,experimental,False,1,0.9,0.8273333333333334,0.26380897472313514,0.07651235183723298,0.764,0.3391450778109149,500
+Linear,Logistic,experimental,False,1,0.95,0.89,0.3143477819446344,0.07651235183723298,0.86,0.38221145190836553,500
+Linear,Logistic,experimental,False,4,0.9,0.30266666666666664,1.079363915584256,0.9059563006943179,0.052,1.3590553655028934,500
+Linear,Logistic,experimental,False,4,0.95,0.382,1.2861414329481164,0.9059563006943179,0.082,1.5468325522554665,500
+Linear,Logistic,experimental,False,6,0.9,0.908,1.0862190109211047,0.2644786913841181,0.918,1.3673960998038357,500
+Linear,Logistic,experimental,False,6,0.95,0.9586666666666667,1.2943097828551615,0.2644786913841181,0.96,1.556756668057947,500
+Linear,Logistic,experimental,True,1,0.9,0.828,0.2637980134263197,0.0766285233894392,0.768,0.3386839537931058,500
+Linear,Logistic,experimental,True,1,0.95,0.8906666666666666,0.3143347207538815,0.0766285233894392,0.86,0.3828765812355697,500
+Linear,Logistic,experimental,True,4,0.9,0.304,1.079118204584409,0.9057101368315867,0.052,1.3601845048507253,500
+Linear,Logistic,experimental,True,4,0.95,0.3813333333333333,1.285848650233342,0.9057101368315867,0.08,1.5471926536776797,500
+Linear,Logistic,experimental,True,6,0.9,0.9086666666666666,1.0861705541932378,0.26441199299249185,0.912,1.3670083032848188,500
+Linear,Logistic,experimental,True,6,0.95,0.954,1.2942520431025955,0.26441199299249185,0.96,1.551861252097446,500
+Linear,Logistic,observational,False,1,0.9,0.9033333333333333,0.28400524491310203,0.0687914087907478,0.906,0.364825125167759,500
+Linear,Logistic,observational,False,1,0.95,0.9493333333333334,0.3384131221948419,0.0687914087907478,0.954,0.41166540236207466,500
+Linear,Logistic,observational,False,4,0.9,0.402,1.3928519980132097,0.8965788830797046,0.204,1.7456908682992962,500
+Linear,Logistic,observational,False,4,0.95,0.52,1.6596855228754568,0.8965788830797046,0.32,1.98542725431534,500
+Linear,Logistic,observational,False,6,0.9,0.8973333333333333,1.1329344312687526,0.28201333547394114,0.924,1.422606687890446,500
+Linear,Logistic,observational,False,6,0.95,0.9573333333333334,1.3499746395352878,0.28201333547394114,0.968,1.6193744244245225,500
+Linear,Logistic,observational,True,1,0.9,0.9026666666666666,0.2821845848144727,0.06867681148060421,0.902,0.3621405665817862,500
+Linear,Logistic,observational,True,1,0.95,0.95,0.3362436718784534,0.06867681148060421,0.948,0.4091358145384905,500
+Linear,Logistic,observational,True,4,0.9,0.4033333333333333,1.382242933428948,0.8986180941601967,0.186,1.7319864689480753,500
+Linear,Logistic,observational,True,4,0.95,0.5146666666666666,1.6470440427132675,0.8986180941601967,0.298,1.9706705969890153,500
+Linear,Logistic,observational,True,6,0.9,0.908,1.1253662193074963,0.27896539107985413,0.912,1.413271689719664,500
+Linear,Logistic,observational,True,6,0.95,0.956,1.340956558759967,0.27896539107985413,0.958,1.6086417215587723,500
diff --git a/results/did/did_pa_multi_metadata.csv b/results/did/did_pa_multi_metadata.csv
new file mode 100644
index 0000000..1a3d408
--- /dev/null
+++ b/results/did/did_pa_multi_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,DIDMultiCoverageSimulation,2025-06-13 08:54,24.26406863530477,3.12.9,scripts/did/did_pa_multi_config.yml
diff --git a/results/did/did_pa_multi_time.csv b/results/did/did_pa_multi_time.csv
new file mode 100644
index 0000000..5de9bd4
--- /dev/null
+++ b/results/did/did_pa_multi_time.csv
@@ -0,0 +1,49 @@
+Learner g,Learner m,Score,In-sample-norm.,DGP,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.9,0.078,0.6692424370363118,0.6128712215451249,0.05,0.790171311463466,500
+LGBM Regr.,LGBM Clas.,experimental,False,1,0.95,0.13466666666666666,0.7974515495023343,0.6128712215451249,0.084,0.9136560537247234,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.9,0.074,0.6062091915221344,0.6249192186691614,0.044,0.7249820763436224,500
+LGBM Regr.,LGBM Clas.,experimental,False,4,0.95,0.116,0.722342804862588,0.6249192186691614,0.092,0.8361639582496219,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.9,0.9073333333333333,0.599044242636866,0.14008945998182312,0.906,0.7191589886705783,500
+LGBM Regr.,LGBM Clas.,experimental,False,6,0.95,0.956,0.7138052416800065,0.14008945998182312,0.944,0.8277825992127831,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.9,0.08266666666666665,0.6696150159796451,0.6156067342388867,0.042,0.791731909415232,500
+LGBM Regr.,LGBM Clas.,experimental,True,1,0.95,0.13466666666666666,0.7978955046958943,0.6156067342388867,0.088,0.915527967488749,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.9,0.07266666666666666,0.6063313580429187,0.6229335321737121,0.048,0.7263381757517958,500
+LGBM Regr.,LGBM Clas.,experimental,True,4,0.95,0.11666666666666665,0.7224883752506945,0.6229335321737121,0.08,0.8363885915780758,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.9,0.91,0.5991634560721969,0.1412786905584327,0.904,0.7194455787684115,500
+LGBM Regr.,LGBM Clas.,experimental,True,6,0.95,0.954,0.7139472932497593,0.1412786905584327,0.956,0.8287810641013134,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.9,0.9526666666666667,0.8790712653242123,0.18826349561304626,0.968,1.0635292264065768,500
+LGBM Regr.,LGBM Clas.,observational,False,1,0.95,0.9853333333333334,1.0474780197146027,0.18826349561304626,0.992,1.2228579250647753,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.9,0.9353333333333333,1.3269252174372335,0.26178211241085814,0.928,1.5701620021658818,500
+LGBM Regr.,LGBM Clas.,observational,False,4,0.95,0.9666666666666667,1.5811289185500799,0.26178211241085814,0.96,1.8101418808788674,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.9,0.9533333333333334,0.7694077871880038,0.155760065723021,0.968,0.9317467276936701,500
+LGBM Regr.,LGBM Clas.,observational,False,6,0.95,0.9853333333333334,0.9168059258306492,0.155760065723021,0.986,1.070456368054135,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.9,0.9573333333333334,0.7713377451893,0.16339158562782916,0.96,0.9369168763484825,500
+LGBM Regr.,LGBM Clas.,observational,True,1,0.95,0.9826666666666666,0.9191056126308821,0.16339158562782916,0.984,1.074393848207727,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.9,0.89,1.1451904937521304,0.26297336131398186,0.868,1.3572001851162259,500
+LGBM Regr.,LGBM Clas.,observational,True,4,0.95,0.9386666666666666,1.364578638739894,0.26297336131398186,0.92,1.567125347231284,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.9,0.946,0.7018901783436629,0.14653434182967903,0.946,0.8487679999150168,500
+LGBM Regr.,LGBM Clas.,observational,True,6,0.95,0.978,0.8363537327060658,0.14653434182967903,0.976,0.9760935044639452,500
+Linear,Logistic,experimental,False,1,0.9,0.7953333333333333,0.24405181357972996,0.07605161668436612,0.726,0.31285765298747464,500
+Linear,Logistic,experimental,False,1,0.95,0.8626666666666666,0.29080567239559374,0.07605161668436612,0.82,0.3535441044205644,500
+Linear,Logistic,experimental,False,4,0.9,0.04,0.9668363884575001,1.0600853196453421,0.032,1.107680731461651,500
+Linear,Logistic,experimental,False,4,0.95,0.06933333333333333,1.1520566141994981,1.0600853196453421,0.058,1.2882352455188046,500
+Linear,Logistic,experimental,False,6,0.9,0.9126666666666666,0.9650429626258434,0.23526932641417123,0.914,1.1100456562874579,500
+Linear,Logistic,experimental,False,6,0.95,0.9513333333333334,1.149919615513782,0.23526932641417123,0.948,1.2887467043073448,500
+Linear,Logistic,experimental,True,1,0.9,0.7973333333333333,0.24405353188120116,0.07602557863080843,0.732,0.3126886691789105,500
+Linear,Logistic,experimental,True,1,0.95,0.8593333333333334,0.29080771987808274,0.07602557863080843,0.818,0.3535552665568589,500
+Linear,Logistic,experimental,True,4,0.9,0.041333333333333326,0.9667071224747424,1.0601977183113478,0.034,1.10968249152393,500
+Linear,Logistic,experimental,True,4,0.95,0.07066666666666667,1.1519025842806776,1.0601977183113478,0.054,1.2887777719835505,500
+Linear,Logistic,experimental,True,6,0.9,0.9093333333333333,0.9649936655742097,0.23538050665454452,0.922,1.1093549725922547,500
+Linear,Logistic,experimental,True,6,0.95,0.9513333333333334,1.1498608744536882,0.23538050665454452,0.948,1.287941663582221,500
+Linear,Logistic,observational,False,1,0.9,0.89,0.2744673956918025,0.06807514734567709,0.884,0.35184020083028195,500
+Linear,Logistic,observational,False,1,0.95,0.9433333333333334,0.3270480738662758,0.06807514734567709,0.932,0.39746810181615344,500
+Linear,Logistic,observational,False,4,0.9,0.18066666666666667,1.372715206976075,1.051508566974799,0.156,1.544299564377621,500
+Linear,Logistic,observational,False,4,0.95,0.25533333333333336,1.6356910564072515,1.051508566974799,0.216,1.8014374703838525,500
+Linear,Logistic,observational,False,6,0.9,0.8986666666666666,1.0120400522734778,0.2513420400041841,0.902,1.1637402258830514,500
+Linear,Logistic,observational,False,6,0.95,0.95,1.2059201018660433,0.2513420400041841,0.952,1.354296135095299,500
+Linear,Logistic,observational,True,1,0.9,0.8886666666666666,0.2719876029928796,0.06788546810358119,0.886,0.34872586132705413,500
+Linear,Logistic,observational,True,1,0.95,0.942,0.3240932186138835,0.06788546810358119,0.94,0.3941489528436907,500
+Linear,Logistic,observational,True,4,0.9,0.17666666666666667,1.3599704742520904,1.0529815437473877,0.146,1.531580058039442,500
+Linear,Logistic,observational,True,4,0.95,0.252,1.6205047706962885,1.0529815437473877,0.21,1.7831373237480377,500
+Linear,Logistic,observational,True,6,0.9,0.904,1.0055529300121109,0.2509111677982569,0.904,1.1587145925499964,500
+Linear,Logistic,observational,True,6,0.95,0.9533333333333334,1.1981902189225062,0.2509111677982569,0.952,1.34594676137346,500
diff --git a/results/irm/apo_config.yml b/results/irm/apo_config.yml
new file mode 100644
index 0000000..5f31101
--- /dev/null
+++ b/results/irm/apo_config.yml
@@ -0,0 +1,49 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 500
+ n_levels:
+ - 2
+ linear:
+ - true
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logit: &id002
+ name: Logistic
+ lgbmr: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+ lgbmc: &id004
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+dml_parameters:
+ treatment_level:
+ - 0
+ - 1
+ - 2
+ trimming_threshold:
+ - 0.01
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id004
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/apo_coverage.csv b/results/irm/apo_coverage.csv
new file mode 100644
index 0000000..cb1bc37
--- /dev/null
+++ b/results/irm/apo_coverage.csv
@@ -0,0 +1,25 @@
+Learner g,Learner m,Treatment Level,level,Coverage,CI Length,Bias,repetition
+LGBM Regr.,LGBM Clas.,0,0.9,0.921,8.43473750706522,2.0020174052425626,1000
+LGBM Regr.,LGBM Clas.,0,0.95,0.968,10.050609648188914,2.0020174052425626,1000
+LGBM Regr.,LGBM Clas.,1,0.9,0.945,34.49785903442266,8.012907711983175,1000
+LGBM Regr.,LGBM Clas.,1,0.95,0.983,41.10673444938877,8.012907711983175,1000
+LGBM Regr.,LGBM Clas.,2,0.9,0.909,33.42971449937157,8.333206392907996,1000
+LGBM Regr.,LGBM Clas.,2,0.95,0.973,39.83396173291094,8.333206392907996,1000
+LGBM Regr.,Logistic,0,0.9,0.905,5.626372580880361,1.390211734015615,1000
+LGBM Regr.,Logistic,0,0.95,0.958,6.704236438695905,1.390211734015615,1000
+LGBM Regr.,Logistic,1,0.9,0.922,7.220302724612175,1.6901658245391882,1000
+LGBM Regr.,Logistic,1,0.95,0.952,8.603521350373505,1.6901658245391882,1000
+LGBM Regr.,Logistic,2,0.9,0.91,7.160030685666201,1.6407106001549503,1000
+LGBM Regr.,Logistic,2,0.95,0.957,8.531702786293828,1.6407106001549503,1000
+Linear,LGBM Clas.,0,0.9,0.902,5.4602785529816185,1.3727521781733092,1000
+Linear,LGBM Clas.,0,0.95,0.95,6.506323197423445,1.3727521781733092,1000
+Linear,LGBM Clas.,1,0.9,0.946,9.92791552556521,2.067430586356608,1000
+Linear,LGBM Clas.,1,0.95,0.979,11.82984099790536,2.067430586356608,1000
+Linear,LGBM Clas.,2,0.9,0.93,7.18671690478672,1.5695466395151154,1000
+Linear,LGBM Clas.,2,0.95,0.971,8.563501377671649,1.5695466395151154,1000
+Linear,Logistic,0,0.9,0.897,5.347321418519404,1.344857338767798,1000
+Linear,Logistic,0,0.95,0.949,6.371726469960765,1.344857338767798,1000
+Linear,Logistic,1,0.9,0.901,5.430231052306794,1.3595988005998974,1000
+Linear,Logistic,1,0.95,0.947,6.470519392037281,1.3595988005998974,1000
+Linear,Logistic,2,0.9,0.898,5.376486330054867,1.3454318063315418,1000
+Linear,Logistic,2,0.95,0.947,6.406478605521755,1.3454318063315418,1000
diff --git a/results/irm/apo_metadata.csv b/results/irm/apo_metadata.csv
new file mode 100644
index 0000000..443288c
--- /dev/null
+++ b/results/irm/apo_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,APOCoverageSimulation,2025-06-05 13:49,73.86938846111298,3.12.3,scripts/irm/apo_config.yml
diff --git a/results/irm/apos_causal_contrast.csv b/results/irm/apos_causal_contrast.csv
new file mode 100644
index 0000000..aa9f305
--- /dev/null
+++ b/results/irm/apos_causal_contrast.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,0.9,0.904,33.69662163571549,8.483004437185645,0.926,39.85455370375964,1000
+LGBM Regr.,LGBM Clas.,0.95,0.962,40.152001202125206,8.483004437185645,0.971,45.73641635076257,1000
+LGBM Regr.,Logistic,0.9,0.94,5.358652779061842,1.1112961153926972,0.938,6.338924854198347,1000
+LGBM Regr.,Logistic,0.95,0.9705,6.385228618842048,1.1112961153926972,0.971,7.278594548770065,1000
+Linear,LGBM Clas.,0.9,0.961,6.6486166061347785,1.2777994119295877,0.974,7.879408694900085,1000
+Linear,LGBM Clas.,0.95,0.987,7.922315324306693,1.2777994119295877,0.992,9.038546968123926,1000
+Linear,Logistic,0.9,0.863,1.1418873926566593,0.3053856763981124,0.855,1.3481716926442315,1000
+Linear,Logistic,0.95,0.9275,1.3606427510242092,0.3053856763981124,0.921,1.5482567665033886,1000
diff --git a/results/irm/apos_config.yml b/results/irm/apos_config.yml
new file mode 100644
index 0000000..40be90e
--- /dev/null
+++ b/results/irm/apos_config.yml
@@ -0,0 +1,49 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 500
+ n_levels:
+ - 2
+ linear:
+ - true
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logit: &id002
+ name: Logistic
+ lgbmr: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+ lgbmc: &id004
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+dml_parameters:
+ treatment_levels:
+ - - 0
+ - 1
+ - 2
+ trimming_threshold:
+ - 0.01
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id004
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/apos_coverage.csv b/results/irm/apos_coverage.csv
new file mode 100644
index 0000000..d672bd2
--- /dev/null
+++ b/results/irm/apos_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,0.9,0.9203333333333333,25.45613129678526,6.166381695751374,0.946,32.623943884889655,1000
+LGBM Regr.,LGBM Clas.,0.95,0.9643333333333334,30.3328513309069,6.166381695751374,0.986,36.86913655114933,1000
+LGBM Regr.,Logistic,0.9,0.917,6.627738366241373,1.5016545559757806,0.92,8.149255859131907,1000
+LGBM Regr.,Logistic,0.95,0.9596666666666667,7.897437367033678,1.5016545559757806,0.959,9.320000315514479,1000
+Linear,LGBM Clas.,0.9,0.9376666666666666,7.5134083953879935,1.6006918303560986,0.951,9.29091412660083,1000
+Linear,LGBM Clas.,0.95,0.974,8.952778298816861,1.6006918303560986,0.974,10.609555760885549,1000
+Linear,Logistic,0.9,0.915,5.39079994210578,1.2559293498489636,0.914,5.8155441975859725,1000
+Linear,Logistic,0.95,0.96,6.423534326255072,1.2559293498489636,0.959,6.833558835632949,1000
diff --git a/results/irm/apos_metadata.csv b/results/irm/apos_metadata.csv
new file mode 100644
index 0000000..10c6d4c
--- /dev/null
+++ b/results/irm/apos_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,APOSCoverageSimulation,2025-06-05 13:49,73.850344034036,3.12.3,scripts/irm/apos_config.yml
diff --git a/results/irm/cvar_Y0_coverage.csv b/results/irm/cvar_Y0_coverage.csv
new file mode 100644
index 0000000..4c25ee7
--- /dev/null
+++ b/results/irm/cvar_Y0_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
+LGBM Regr.,LGBM Clas.,0.9,0.8564285714285714,0.5599803695184898,0.15691823669038846,200
+LGBM Regr.,LGBM Clas.,0.95,0.9242857142857143,0.6672577658717421,0.15691823669038846,200
+LGBM Regr.,Logistic,0.9,0.8,0.4488498613139841,0.13502164231417138,200
+LGBM Regr.,Logistic,0.95,0.8842857142857143,0.5348375978424744,0.13502164231417138,200
+Linear,LGBM Clas.,0.9,0.7778571428571429,0.5748146502742429,0.16876670012237052,200
+Linear,LGBM Clas.,0.95,0.8607142857142857,0.6849339016332675,0.16876670012237052,200
+Linear,Logistic,0.9,0.7521428571428571,0.4599365576395126,0.14286782087753735,200
+Linear,Logistic,0.95,0.832857142857143,0.5480482113277858,0.14286782087753735,200
diff --git a/results/irm/cvar_Y1_coverage.csv b/results/irm/cvar_Y1_coverage.csv
new file mode 100644
index 0000000..8fddf73
--- /dev/null
+++ b/results/irm/cvar_Y1_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
+LGBM Regr.,LGBM Clas.,0.9,0.9014285714285714,0.19064863373047444,0.046573467818208994,200
+LGBM Regr.,LGBM Clas.,0.95,0.9535714285714286,0.22717185875440954,0.046573467818208994,200
+LGBM Regr.,Logistic,0.9,0.8921428571428571,0.18035991253115108,0.044703609418269445,200
+LGBM Regr.,Logistic,0.95,0.942857142857143,0.2149120912789158,0.044703609418269445,200
+Linear,LGBM Clas.,0.9,0.9064285714285714,0.21197545188306893,0.04818749120158227,200
+Linear,LGBM Clas.,0.95,0.957857142857143,0.25258432999137354,0.04818749120158227,200
+Linear,Logistic,0.9,0.9007142857142857,0.1965222149886573,0.04731821601748326,200
+Linear,Logistic,0.95,0.9457142857142857,0.23417066250063942,0.04731821601748326,200
diff --git a/results/irm/cvar_config.yml b/results/irm/cvar_config.yml
new file mode 100644
index 0000000..5157d7e
--- /dev/null
+++ b/results/irm/cvar_config.yml
@@ -0,0 +1,65 @@
+simulation_parameters:
+ repetitions: 200
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 5000
+ dim_x:
+ - 5
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logit: &id002
+ name: Logistic
+ lgbmr: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 200
+ learning_rate: 0.05
+ num_leaves: 15
+ max_depth: 5
+ min_child_samples: 10
+ subsample: 0.9
+ colsample_bytree: 0.9
+ reg_alpha: 0.0
+ reg_lambda: 0.1
+ random_state: 42
+ lgbmc: &id004
+ name: LGBM Clas.
+ params:
+ n_estimators: 200
+ learning_rate: 0.05
+ num_leaves: 15
+ max_depth: 5
+ min_child_samples: 10
+ subsample: 0.9
+ colsample_bytree: 0.9
+ reg_alpha: 0.0
+ reg_lambda: 0.1
+ random_state: 42
+dml_parameters:
+ tau_vec:
+ - - 0.2
+ - 0.3
+ - 0.4
+ - 0.5
+ - 0.6
+ - 0.7
+ - 0.8
+ trimming_threshold:
+ - 0.01
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id004
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/cvar_coverage_metadata.csv b/results/irm/cvar_coverage_metadata.csv
deleted file mode 100644
index f14d35e..0000000
--- a/results/irm/cvar_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,cvar_coverage.py,2025-05-22 15:47:04,15261.781089544296,3.12.10
diff --git a/results/irm/cvar_coverage_pq0.csv b/results/irm/cvar_coverage_pq0.csv
deleted file mode 100644
index f94d6bb..0000000
--- a/results/irm/cvar_coverage_pq0.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,0.9,0.8892307692307692,0.5699689541680535,0.13916120902511853,100
-LGBM,LGBM,0.95,0.9461538461538461,0.6791598985897508,0.13916120902511853,100
-LGBM,Logistic Regression,0.9,0.8207692307692308,0.4060171336908242,0.11838313391016886,100
-LGBM,Logistic Regression,0.95,0.8907692307692308,0.4837992549009209,0.11838313391016886,100
-Linear,LGBM,0.9,0.7707692307692308,0.5801661718639771,0.1746635044255932,100
-Linear,LGBM,0.95,0.8630769230769231,0.691310632915921,0.1746635044255932,100
-Linear,Logistic Regression,0.9,0.69,0.4294697114538,0.1539651338207486,100
-Linear,Logistic Regression,0.95,0.7792307692307692,0.5117447249457237,0.1539651338207486,100
diff --git a/results/irm/cvar_coverage_pq1.csv b/results/irm/cvar_coverage_pq1.csv
deleted file mode 100644
index 321647f..0000000
--- a/results/irm/cvar_coverage_pq1.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,0.9,0.9323076923076923,0.1908311409902022,0.04341246693737058,100
-LGBM,LGBM,0.95,0.9792307692307692,0.22738932956769187,0.04341246693737058,100
-LGBM,Logistic Regression,0.9,0.9130769230769231,0.1776451248637835,0.04403267398281018,100
-LGBM,Logistic Regression,0.95,0.963076923076923,0.21167722225073635,0.04403267398281018,100
-Linear,LGBM,0.9,0.9307692307692308,0.21584587388672186,0.047576051617349686,100
-Linear,LGBM,0.95,0.9776923076923077,0.25719622226423833,0.047576051617349686,100
-Linear,Logistic Regression,0.9,0.8884615384615384,0.1934218436107483,0.04906705213284509,100
-Linear,Logistic Regression,0.95,0.943076923076923,0.23047634214298993,0.04906705213284509,100
diff --git a/results/irm/cvar_coverage_qte.csv b/results/irm/cvar_coverage_qte.csv
deleted file mode 100644
index 7abdfa2..0000000
--- a/results/irm/cvar_coverage_qte.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.9007692307692308,0.5818966962059838,0.14389616733672375,0.88,0.7055777033989438,100
-LGBM,LGBM,0.95,0.9492307692307692,0.693372679853793,0.14389616733672375,0.95,0.8113769406399532,100
-LGBM,Logistic Regression,0.9,0.81,0.41715844354328135,0.12275759703405408,0.78,0.5062794648957274,100
-LGBM,Logistic Regression,0.95,0.8692307692307693,0.49707494441737266,0.12275759703405408,0.86,0.5835121707560351,100
-Linear,LGBM,0.9,0.8007692307692308,0.6065907561805581,0.1798354817301676,0.8,0.7182897195756133,100
-Linear,LGBM,0.95,0.8638461538461538,0.7227974671960807,0.1798354817301676,0.85,0.8301176550726784,100
-Linear,Logistic Regression,0.9,0.7084615384615384,0.45417244635105875,0.15263121995044057,0.69,0.5341807534621331,100
-Linear,Logistic Regression,0.95,0.8123076923076923,0.5411798490959508,0.15263121995044057,0.8,0.6185753871641957,100
diff --git a/results/irm/cvar_effect_coverage.csv b/results/irm/cvar_effect_coverage.csv
new file mode 100644
index 0000000..b17f3f4
--- /dev/null
+++ b/results/irm/cvar_effect_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Clas.,0.9,0.8357142857142857,0.572356196179233,0.16148728231793735,0.8,0.6933257488655766,200
+LGBM Regr.,LGBM Clas.,0.95,0.912142857142857,0.6820044728957118,0.16148728231793735,0.89,0.7996943722331994,200
+LGBM Regr.,Logistic,0.9,0.812142857142857,0.4603475364424448,0.13621838163720854,0.785,0.5540435315601864,200
+LGBM Regr.,Logistic,0.95,0.885,0.5485379227762442,0.13621838163720854,0.86,0.6395859491088156,200
+Linear,LGBM Clas.,0.9,0.7835714285714286,0.6002467096290228,0.17327648690690606,0.75,0.7089505880560413,200
+Linear,LGBM Clas.,0.95,0.8592857142857143,0.7152380694761148,0.17327648690690606,0.815,0.8220830080641385,200
+Linear,Logistic,0.9,0.7742857142857144,0.48428688399508923,0.14834465693639912,0.755,0.5678723651822354,200
+Linear,Logistic,0.95,0.85,0.5770634148004378,0.14834465693639912,0.82,0.6581721908143731,200
diff --git a/results/irm/cvar_metadata.csv b/results/irm/cvar_metadata.csv
new file mode 100644
index 0000000..6db12ae
--- /dev/null
+++ b/results/irm/cvar_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,CVARCoverageSimulation,2025-06-05 14:11,94.70462875763575,3.12.3,scripts/irm/cvar_config.yml
diff --git a/results/irm/iivm_late_config.yml b/results/irm/iivm_late_config.yml
new file mode 100644
index 0000000..d549111
--- /dev/null
+++ b/results/irm/iivm_late_config.yml
@@ -0,0 +1,75 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 0.5
+ n_obs:
+ - 500
+ dim_x:
+ - 20
+ alpha_x:
+ - 1.0
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ logit: &id002
+ name: Logistic
+ lgbmr: &id004
+ name: LGBM Regr.
+ params:
+ n_estimators: 100
+ learning_rate: 0.05
+ num_leaves: 7
+ max_depth: 3
+ min_child_samples: 20
+ subsample: 1.0
+ colsample_bytree: 0.8
+ reg_alpha: 0.1
+ reg_lambda: 1.0
+ random_state: 42
+ lgbmc: &id003
+ name: LGBM Clas.
+ params:
+ n_estimators: 100
+ learning_rate: 0.05
+ num_leaves: 7
+ max_depth: 3
+ min_child_samples: 20
+ subsample: 1.0
+ colsample_bytree: 0.8
+ reg_alpha: 0.1
+ reg_lambda: 1.0
+ random_state: 42
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ ml_r: *id002
+ - ml_g: *id001
+ ml_m: *id002
+ ml_r: *id003
+ - ml_g: *id001
+ ml_m: *id003
+ ml_r: *id002
+ - ml_g: *id001
+ ml_m: *id003
+ ml_r: *id003
+ - ml_g: *id004
+ ml_m: *id002
+ ml_r: *id002
+ - ml_g: *id004
+ ml_m: *id002
+ ml_r: *id003
+ - ml_g: *id004
+ ml_m: *id003
+ ml_r: *id002
+ - ml_g: *id004
+ ml_m: *id003
+ ml_r: *id003
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/iivm_late_coverage.csv b/results/irm/iivm_late_coverage.csv
index c234ac0..dcd3993 100644
--- a/results/irm/iivm_late_coverage.csv
+++ b/results/irm/iivm_late_coverage.csv
@@ -1,9 +1,17 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-Lasso,Logistic Regression,0.9,0.902,0.9293147641314788,0.2241721725234453,1000
-Lasso,Logistic Regression,0.95,0.956,1.10734684117444,0.2241721725234453,1000
-Lasso,Random Forest,0.9,0.903,0.9550141700093412,0.23077333095440236,1000
-Lasso,Random Forest,0.95,0.957,1.1379695720480933,0.23077333095440236,1000
-Random Forest,Logistic Regression,0.9,0.9,0.9629145290806888,0.23355881870984344,1000
-Random Forest,Logistic Regression,0.95,0.951,1.1473834305161408,0.23355881870984344,1000
-Random Forest,Random Forest,0.9,0.904,0.9912410494922755,0.23401096266112748,1000
-Random Forest,Random Forest,0.95,0.959,1.1811365614357268,0.23401096266112748,1000
+Learner g,Learner m,Learner r,level,Coverage,CI Length,Bias,repetition
+LGBM Regr.,LGBM Clas.,LGBM Clas.,0.9,0.914,1.1170031612339868,0.26443161772788826,1000
+LGBM Regr.,LGBM Clas.,LGBM Clas.,0.95,0.968,1.3309913604249184,0.26443161772788826,1000
+LGBM Regr.,LGBM Clas.,Logistic,0.9,0.921,1.111120207889174,0.2669195093646889,1000
+LGBM Regr.,LGBM Clas.,Logistic,0.95,0.969,1.32398138914867,0.2669195093646889,1000
+LGBM Regr.,Logistic,LGBM Clas.,0.9,0.923,1.0567499407213161,0.25374822037756817,1000
+LGBM Regr.,Logistic,LGBM Clas.,0.95,0.965,1.2591952198915768,0.25374822037756817,1000
+LGBM Regr.,Logistic,Logistic,0.9,0.915,1.0557640391096161,0.2504657709294685,1000
+LGBM Regr.,Logistic,Logistic,0.95,0.966,1.2580204456626907,0.2504657709294685,1000
+LassoCV,LGBM Clas.,LGBM Clas.,0.9,0.92,1.0534935963070167,0.2478314316215499,1000
+LassoCV,LGBM Clas.,LGBM Clas.,0.95,0.964,1.2553150461978761,0.2478314316215499,1000
+LassoCV,LGBM Clas.,Logistic,0.9,0.915,1.048839165759454,0.2487976919643911,1000
+LassoCV,LGBM Clas.,Logistic,0.95,0.964,1.2497689501244686,0.2487976919643911,1000
+LassoCV,Logistic,LGBM Clas.,0.9,0.918,1.0011806143989663,0.24364342853702406,1000
+LassoCV,Logistic,LGBM Clas.,0.95,0.959,1.1929802835274108,0.24364342853702406,1000
+LassoCV,Logistic,Logistic,0.9,0.916,0.9983343843722963,0.24211448303931346,1000
+LassoCV,Logistic,Logistic,0.95,0.967,1.1895887912678056,0.24211448303931346,1000
diff --git a/results/irm/iivm_late_coverage_metadata.csv b/results/irm/iivm_late_coverage_metadata.csv
deleted file mode 100644
index e737bef..0000000
--- a/results/irm/iivm_late_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,iivm_late_coverage.py,2025-05-22 13:10:22,5866.210592031479,3.12.10
diff --git a/results/irm/iivm_late_metadata.csv b/results/irm/iivm_late_metadata.csv
new file mode 100644
index 0000000..0ab74ee
--- /dev/null
+++ b/results/irm/iivm_late_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.10.0,IIVMLATECoverageSimulation,2025-06-05 14:57,2.219698127110799,3.12.9,scripts/irm/iivm_late_config.yml
diff --git a/results/irm/irm_apo_coverage_apo.csv b/results/irm/irm_apo_coverage_apo.csv
deleted file mode 100644
index e830e1b..0000000
--- a/results/irm/irm_apo_coverage_apo.csv
+++ /dev/null
@@ -1,25 +0,0 @@
-Learner g,Learner m,Treatment Level,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,0.0,0.9,0.912,8.657690136121921,2.076508232578001,1000
-LGBM,LGBM,0.0,0.95,0.966,10.316274091547035,2.076508232578001,1000
-LGBM,LGBM,1.0,0.9,0.914,38.23339285821166,9.211992345283987,1000
-LGBM,LGBM,1.0,0.95,0.967,45.55789754237906,9.211992345283987,1000
-LGBM,LGBM,2.0,0.9,0.891,37.49194764946096,9.582632590363396,1000
-LGBM,LGBM,2.0,0.95,0.952,44.67441108385784,9.582632590363396,1000
-LGBM,Logistic,0.0,0.9,0.904,5.625897101886533,1.3388185183680807,1000
-LGBM,Logistic,0.0,0.95,0.954,6.7036698705295725,1.3388185183680807,1000
-LGBM,Logistic,1.0,0.9,0.923,7.423300143143785,1.6937126309587676,1000
-LGBM,Logistic,1.0,0.95,0.968,8.84540769378873,1.6937126309587676,1000
-LGBM,Logistic,2.0,0.9,0.92,7.321275660150268,1.66124252169416,1000
-LGBM,Logistic,2.0,0.95,0.969,8.723838024042964,1.66124252169416,1000
-Linear,LGBM,0.0,0.9,0.901,5.498257423071024,1.309688195531907,1000
-Linear,LGBM,0.0,0.95,0.95,6.551577812380716,1.309688195531907,1000
-Linear,LGBM,1.0,0.9,0.949,10.700720020780512,2.128644427723186,1000
-Linear,LGBM,1.0,0.95,0.983,12.75069435099058,2.128644427723186,1000
-Linear,LGBM,2.0,0.9,0.933,7.513644049429104,1.6358873525441715,1000
-Linear,LGBM,2.0,0.95,0.968,8.953059097926168,1.6358873525441715,1000
-Linear,Logistic,0.0,0.9,0.902,5.335670092717667,1.2884949290748537,1000
-Linear,Logistic,0.0,0.95,0.953,6.357843058957276,1.2884949290748537,1000
-Linear,Logistic,1.0,0.9,0.908,5.417512107920403,1.280308177753247,1000
-Linear,Logistic,1.0,0.95,0.956,6.455363835025866,1.280308177753247,1000
-Linear,Logistic,2.0,0.9,0.906,5.366403391397197,1.28401173581695,1000
-Linear,Logistic,2.0,0.95,0.957,6.3944640430685675,1.28401173581695,1000
diff --git a/results/irm/irm_apo_coverage_apos.csv b/results/irm/irm_apo_coverage_apos.csv
deleted file mode 100644
index 3531ab7..0000000
--- a/results/irm/irm_apo_coverage_apos.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.9066666666666666,28.21021843292038,7.119697418816689,0.925,36.1943517986729,1000
-LGBM,LGBM,0.95,0.958,33.61454856442563,7.119697418816689,0.973,40.868020474452145,1000
-LGBM,Logistic,0.9,0.9126666666666666,6.789316986971467,1.5725415986639164,0.922,8.394070597867524,1000
-LGBM,Logistic,0.95,0.9626666666666667,8.089970168806186,1.5725415986639164,0.96,9.592284619044477,1000
-Linear,LGBM,0.9,0.927,7.903418354805325,1.7282923156600922,0.937,9.850445774370156,1000
-Linear,LGBM,0.95,0.9676666666666667,9.41750382912841,1.7282923156600922,0.974,11.234392064837424,1000
-Linear,Logistic,0.9,0.9043333333333333,5.372532806955153,1.2820240087685983,0.901,5.7964894175017925,1000
-Linear,Logistic,0.95,0.9566666666666667,6.4017676921854445,1.2820240087685983,0.952,6.8182385960659,1000
diff --git a/results/irm/irm_apo_coverage_apos_contrast.csv b/results/irm/irm_apo_coverage_apos_contrast.csv
deleted file mode 100644
index 2c98fb5..0000000
--- a/results/irm/irm_apo_coverage_apos_contrast.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.8885,37.87536523730769,9.788862143523833,0.898,44.828925781533165,1000
-LGBM,LGBM,0.95,0.9485,45.13128131893863,9.788862143523833,0.965,51.42387403222799,1000
-LGBM,Logistic,0.9,0.9275,5.725128733165455,1.2673279809901974,0.927,6.774304315459575,1000
-LGBM,Logistic,0.95,0.9635,6.821911652197591,1.2673279809901974,0.963,7.7654094416927215,1000
-Linear,LGBM,0.9,0.958,7.430953406859927,1.5064252052940996,0.975,8.798628447594016,1000
-Linear,LGBM,0.95,0.989,8.85452711998085,1.5064252052940996,0.992,10.090116294778499,1000
-Linear,Logistic,0.9,0.8735,1.1425883251377777,0.29465524587422715,0.87,1.3505222299078565,1000
-Linear,Logistic,0.95,0.9355,1.3614779635902856,0.29465524587422715,0.92,1.5496710496635275,1000
diff --git a/results/irm/irm_apo_coverage_metadata.csv b/results/irm/irm_apo_coverage_metadata.csv
deleted file mode 100644
index 1e26249..0000000
--- a/results/irm/irm_apo_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,irm_apo_coverage.py,2025-05-22 13:10:29,5871.5619258880615,3.12.10
diff --git a/results/irm/irm_ate_config.yml b/results/irm/irm_ate_config.yml
new file mode 100644
index 0000000..d19a50a
--- /dev/null
+++ b/results/irm/irm_ate_config.yml
@@ -0,0 +1,61 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 0.5
+ n_obs:
+ - 500
+ dim_x:
+ - 20
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ logit: &id002
+ name: Logistic
+ rfr: &id003
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ rfc: &id004
+ name: RF Clas.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ lgbmr: &id005
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ lgbmc: &id006
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id001
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id005
+ ml_m: *id006
+ - ml_g: *id005
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id006
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/irm_ate_coverage.csv b/results/irm/irm_ate_coverage.csv
index 9ed9b19..46ebf4c 100644
--- a/results/irm/irm_ate_coverage.csv
+++ b/results/irm/irm_ate_coverage.csv
@@ -1,9 +1,15 @@
Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-Lasso,Logistic Regression,0.9,0.875,0.467771360497192,0.12336884150536215,1000
-Lasso,Logistic Regression,0.95,0.935,0.5573839547492131,0.12336884150536215,1000
-Lasso,Random Forest,0.9,0.904,0.5986207495475606,0.14643826768714088,1000
-Lasso,Random Forest,0.95,0.954,0.7133006185396007,0.14643826768714088,1000
-Random Forest,Logistic Regression,0.9,0.802,0.515856329530175,0.1497402299287362,1000
-Random Forest,Logistic Regression,0.95,0.885,0.6146807293424889,0.1497402299287362,1000
-Random Forest,Random Forest,0.9,0.898,0.625184528129902,0.1492705643144829,1000
-Random Forest,Random Forest,0.95,0.948,0.744953313017455,0.1492705643144829,1000
+LGBM Regr.,LGBM Clas.,0.9,0.928,1.1983115160037485,0.2840471834602478,1000
+LGBM Regr.,LGBM Clas.,0.95,0.98,1.4278762408664047,0.2840471834602478,1000
+LGBM Regr.,Logistic,0.9,0.928,0.771069826261061,0.1773323727171827,1000
+LGBM Regr.,Logistic,0.95,0.97,0.9187863675372636,0.1773323727171827,1000
+LassoCV,LGBM Clas.,0.9,0.943,1.0988039710069317,0.25576093311987325,1000
+LassoCV,LGBM Clas.,0.95,0.979,1.3093056877253173,0.25576093311987325,1000
+LassoCV,Logistic,0.9,0.927,0.6575776853999991,0.1495642781049785,1000
+LassoCV,Logistic,0.95,0.968,0.7835521406302206,0.1495642781049785,1000
+LassoCV,RF Clas.,0.9,0.926,0.5837441390355065,0.13723792736069168,1000
+LassoCV,RF Clas.,0.95,0.962,0.6955740437624297,0.13723792736069168,1000
+RF Regr.,Logistic,0.9,0.918,0.743232966143666,0.1705153153049291,1000
+RF Regr.,Logistic,0.95,0.968,0.8856167028456445,0.1705153153049291,1000
+RF Regr.,RF Clas.,0.9,0.905,0.6164614614548363,0.14356423385388378,1000
+RF Regr.,RF Clas.,0.95,0.951,0.7345591379749272,0.14356423385388378,1000
diff --git a/results/irm/irm_ate_coverage_metadata.csv b/results/irm/irm_ate_coverage_metadata.csv
deleted file mode 100644
index 5ed7ac7..0000000
--- a/results/irm/irm_ate_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,irm_ate_coverage.py,2025-05-22 12:32:44,3604.067242860794,3.12.10
diff --git a/results/irm/irm_ate_metadata.csv b/results/irm/irm_ate_metadata.csv
new file mode 100644
index 0000000..03c5799
--- /dev/null
+++ b/results/irm/irm_ate_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,IRMATECoverageSimulation,2025-06-05 14:42,125.66061746279398,3.12.3,scripts/irm/irm_ate_config.yml
diff --git a/results/irm/irm_ate_sensitivity.csv b/results/irm/irm_ate_sensitivity.csv
deleted file mode 100644
index 6e10770..0000000
--- a/results/irm/irm_ate_sensitivity.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition
-LGBM,LGBM,0.9,0.112,0.266748233354866,0.17891290135375168,0.962,1.0,0.12379347892727971,0.05409589192160397,0.04254708028278409,0.32210978560617337,500
-LGBM,LGBM,0.95,0.318,0.31785012462427936,0.17891290135375168,0.998,1.0,0.12379347892727971,0.03441021667548556,0.04254708028278409,0.32210978560617337,500
-LGBM,Logistic Regr.,0.9,0.292,0.2577778025822409,0.14922926552528684,1.0,1.0,0.10066571951295798,0.03493291437943745,0.029012990398602386,0.2979424530565633,500
-LGBM,Logistic Regr.,0.95,0.548,0.30716119707955875,0.14922926552528684,1.0,1.0,0.10066571951295798,0.01869752301454861,0.029012990398602386,0.2979424530565633,500
-Linear Reg.,LGBM,0.9,0.122,0.2675665174758639,0.17873104426193565,0.964,1.0,0.12647219547900976,0.05512739569620471,0.04513946154555041,0.31857328180879246,500
-Linear Reg.,LGBM,0.95,0.314,0.31882517029399604,0.17873104426193565,0.998,1.0,0.12647219547900976,0.035017588858111126,0.04513946154555041,0.31857328180879246,500
-Linear Reg.,Logistic Regr.,0.9,0.86,0.2592281409673473,0.08970251629543106,1.0,1.0,0.06300567732617765,0.006719868195974334,0.05720312141493262,0.23496869651774063,500
-Linear Reg.,Logistic Regr.,0.95,0.974,0.30888938185760084,0.08970251629543106,1.0,1.0,0.06300567732617765,0.0014945204694376396,0.05720312141493262,0.23496869651774063,500
diff --git a/results/irm/irm_ate_sensitivity_config.yml b/results/irm/irm_ate_sensitivity_config.yml
new file mode 100644
index 0000000..74143aa
--- /dev/null
+++ b/results/irm/irm_ate_sensitivity_config.yml
@@ -0,0 +1,53 @@
+simulation_parameters:
+ repetitions: 500
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 5.0
+ n_obs:
+ - 5000
+ trimming_threshold:
+ - 0.05
+ var_epsilon_y:
+ - 1.0
+ linear:
+ - false
+ gamma_a:
+ - 0.198
+ beta_a:
+ - 0.582
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logit: &id002
+ name: Logistic
+ lgbmr: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+ lgbmc: &id004
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id004
+ trimming_threshold:
+ - 0.05
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/irm_ate_sensitivity_coverage.csv b/results/irm/irm_ate_sensitivity_coverage.csv
new file mode 100644
index 0000000..f538604
--- /dev/null
+++ b/results/irm/irm_ate_sensitivity_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition
+LGBM Regr.,LGBM Clas.,0.9,0.084,0.2668540258210539,0.18198204659615963,0.95,1.0,0.12589776145327045,0.05628191414414868,0.044383096119999466,0.32513075000880287,500
+LGBM Regr.,LGBM Clas.,0.95,0.246,0.317976184122928,0.18198204659615963,0.998,1.0,0.12589776145327045,0.03630227835056437,0.044383096119999466,0.32513075000880287,500
+LGBM Regr.,Logistic,0.9,0.26,0.2574630882167088,0.14916839267522197,1.0,1.0,0.10064526477515615,0.034829417508842296,0.026887536242060982,0.297933622145966,500
+LGBM Regr.,Logistic,0.95,0.572,0.3067861917831888,0.14916839267522197,1.0,1.0,0.10064526477515615,0.018255506137347777,0.026887536242060982,0.297933622145966,500
+Linear,LGBM Clas.,0.9,0.082,0.2672263041294733,0.1800922704825838,0.964,1.0,0.12741937328027908,0.056153203215479244,0.04433563312353054,0.31995377591144475,500
+Linear,LGBM Clas.,0.95,0.258,0.31841978108789315,0.1800922704825838,0.996,1.0,0.12741937328027908,0.03559871249886926,0.04433563312353054,0.31995377591144475,500
+Linear,Logistic,0.9,0.868,0.2588792120747325,0.08970647188763244,1.0,1.0,0.06307809186280441,0.006372043222062277,0.0574078098172687,0.23496351259089188,500
+Linear,Logistic,0.95,0.976,0.3084736074376251,0.08970647188763244,1.0,1.0,0.06307809186280441,0.001546577328924639,0.0574078098172687,0.23496351259089188,500
diff --git a/results/irm/irm_ate_sensitivity_metadata.csv b/results/irm/irm_ate_sensitivity_metadata.csv
index 327aa8c..e47f137 100644
--- a/results/irm/irm_ate_sensitivity_metadata.csv
+++ b/results/irm/irm_ate_sensitivity_metadata.csv
@@ -1,2 +1,2 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,irm_ate_sensitivity.py,2025-05-22 14:48:09,6858.955473899841,3.12.10
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,IRMATESensitivityCoverageSimulation,2025-06-05 13:14,37.417966898282366,3.12.3,scripts/irm/irm_ate_sensitivity_config.yml
diff --git a/results/irm/irm_atte_config.yml b/results/irm/irm_atte_config.yml
new file mode 100644
index 0000000..2d3c69a
--- /dev/null
+++ b/results/irm/irm_atte_config.yml
@@ -0,0 +1,61 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 0.5
+ n_obs:
+ - 500
+ dim_x:
+ - 20
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ logit: &id002
+ name: Logistic
+ rfr: &id003
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 20
+ min_samples_leaf: 2
+ rfc: &id004
+ name: RF Clas.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 20
+ min_samples_leaf: 20
+ lgbmr: &id005
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ lgbmc: &id006
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id001
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id005
+ ml_m: *id006
+ - ml_g: *id005
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id006
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/irm_atte_coverage.csv b/results/irm/irm_atte_coverage.csv
index 5255488..5b68231 100644
--- a/results/irm/irm_atte_coverage.csv
+++ b/results/irm/irm_atte_coverage.csv
@@ -1,9 +1,15 @@
Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-Lasso,Logistic Regression,0.9,0.891,0.5331759172799808,0.1350391266439693,1000
-Lasso,Logistic Regression,0.95,0.937,0.6353182910443252,0.1350391266439693,1000
-Lasso,Random Forest,0.9,0.897,0.7382774835343612,0.18122786303688493,1000
-Lasso,Random Forest,0.95,0.948,0.8797118811149396,0.18122786303688493,1000
-Random Forest,Logistic Regression,0.9,0.872,0.5528818976331068,0.15060693615944673,1000
-Random Forest,Logistic Regression,0.95,0.918,0.6587994149202312,0.15060693615944673,1000
-Random Forest,Random Forest,0.9,0.899,0.7455843448660364,0.18245405782788798,1000
-Random Forest,Random Forest,0.95,0.948,0.8884185434072277,0.18245405782788798,1000
+LGBM Regr.,LGBM Clas.,0.9,0.927,1.5064451215730035,0.34563899658615477,1000
+LGBM Regr.,LGBM Clas.,0.95,0.974,1.7950400780897324,0.34563899658615477,1000
+LGBM Regr.,Logistic,0.9,0.926,0.853133738564191,0.2115612747662681,1000
+LGBM Regr.,Logistic,0.95,0.969,1.016571550309234,0.2115612747662681,1000
+LassoCV,LGBM Clas.,0.9,0.912,1.3899632828213013,0.3405205709305417,1000
+LassoCV,LGBM Clas.,0.95,0.977,1.6562434064190357,0.3405205709305417,1000
+LassoCV,Logistic,0.9,0.918,0.7956786618509171,0.19501674862485438,1000
+LassoCV,Logistic,0.95,0.962,0.9481096037616187,0.19501674862485438,1000
+LassoCV,RF Clas.,0.9,0.895,0.5793446092118805,0.1467183519931486,1000
+LassoCV,RF Clas.,0.95,0.945,0.6903316806354453,0.1467183519931486,1000
+RF Regr.,Logistic,0.9,0.915,0.8295563252373992,0.200468421193765,1000
+RF Regr.,Logistic,0.95,0.963,0.9884773295153919,0.200468421193765,1000
+RF Regr.,RF Clas.,0.9,0.881,0.5967830827952515,0.15670311644434254,1000
+RF Regr.,RF Clas.,0.95,0.939,0.7111109035454538,0.15670311644434254,1000
diff --git a/results/irm/irm_atte_coverage_metadata.csv b/results/irm/irm_atte_coverage_metadata.csv
deleted file mode 100644
index 99c4def..0000000
--- a/results/irm/irm_atte_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,irm_atte_coverage.py,2025-05-22 13:53:23,3572.697674512863,3.12.10
diff --git a/results/irm/irm_atte_metadata.csv b/results/irm/irm_atte_metadata.csv
new file mode 100644
index 0000000..876cac0
--- /dev/null
+++ b/results/irm/irm_atte_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,IRMATTECoverageSimulation,2025-06-05 14:42,126.08159985939662,3.12.3,scripts/irm/irm_atte_config.yml
diff --git a/results/irm/irm_atte_sensitivity.csv b/results/irm/irm_atte_sensitivity.csv
deleted file mode 100644
index 7737483..0000000
--- a/results/irm/irm_atte_sensitivity.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition
-LGBM,LGBM,0.9,0.702,0.348892741328716,0.1353547312940485,0.95,1.0,0.10509293446782589,0.024288450266824572,0.06499582700735684,0.25876253126104387,500
-LGBM,LGBM,0.95,0.826,0.41573134306126747,0.1353547312940485,0.982,1.0,0.10509293446782589,0.012452066983782116,0.06499582700735684,0.25876253126104387,500
-LGBM,Logistic Regr.,0.9,0.714,0.34666910502599596,0.13078975736827733,0.964,1.0,0.0981701852821437,0.022246998237972524,0.06545442330246612,0.2589976808508975,500
-LGBM,Logistic Regr.,0.95,0.834,0.41308171698108886,0.13078975736827733,0.984,1.0,0.0981701852821437,0.010949342084431735,0.06545442330246612,0.2589976808508975,500
-Linear Reg.,LGBM,0.9,0.754,0.3496967006881292,0.12455057551341779,0.962,1.0,0.09867724125956995,0.0202175935504151,0.06504946816195568,0.2439341901457105,500
-Linear Reg.,LGBM,0.95,0.858,0.4166893197247619,0.12455057551341779,0.986,1.0,0.09867724125956995,0.009856683129418066,0.06504946816195568,0.2439341901457105,500
-Linear Reg.,Logistic Regr.,0.9,0.948,0.3502540540945954,0.07444772768321123,0.996,1.0,0.05840145836627322,0.004181143741279689,0.09544484272838333,0.17545346180009289,500
-Linear Reg.,Logistic Regr.,0.95,0.976,0.41735344727108903,0.07444772768321123,0.998,1.0,0.05840145836627322,0.0015739249195781788,0.09544484272838333,0.17545346180009289,500
diff --git a/results/irm/irm_atte_sensitivity_config.yml b/results/irm/irm_atte_sensitivity_config.yml
new file mode 100644
index 0000000..bf06bc6
--- /dev/null
+++ b/results/irm/irm_atte_sensitivity_config.yml
@@ -0,0 +1,53 @@
+simulation_parameters:
+ repetitions: 500
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 5.0
+ n_obs:
+ - 5000
+ trimming_threshold:
+ - 0.05
+ var_epsilon_y:
+ - 1.0
+ linear:
+ - false
+ gamma_a:
+ - 0.151
+ beta_a:
+ - 0.582
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logit: &id002
+ name: Logistic
+ lgbmr: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+ lgbmc: &id004
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id004
+ trimming_threshold:
+ - 0.05
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/irm_atte_sensitivity_coverage.csv b/results/irm/irm_atte_sensitivity_coverage.csv
new file mode 100644
index 0000000..d592177
--- /dev/null
+++ b/results/irm/irm_atte_sensitivity_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition
+LGBM Regr.,LGBM Clas.,0.9,0.708,0.3489511146392163,0.13756848436233937,0.94,1.0,0.10664020023640788,0.02469821575907808,0.06297064757762019,0.26272747008699665,500
+LGBM Regr.,LGBM Clas.,0.95,0.826,0.41580089915085766,0.13756848436233937,0.978,1.0,0.10664020023640788,0.012557304495326774,0.06297064757762019,0.26272747008699665,500
+LGBM Regr.,Logistic,0.9,0.728,0.3466996212937586,0.13203748795264333,0.96,1.0,0.0989531978785329,0.02152136562411849,0.061845450573817774,0.261622864513939,500
+LGBM Regr.,Logistic,0.95,0.834,0.41311807935691197,0.13203748795264333,0.988,1.0,0.0989531978785329,0.01068777225560622,0.061845450573817774,0.261622864513939,500
+Linear,LGBM Clas.,0.9,0.77,0.3499025013245624,0.12450248762578023,0.968,1.0,0.09911082424887455,0.019375250746672557,0.061685830635415634,0.24526004965885637,500
+Linear,LGBM Clas.,0.95,0.866,0.41693454630832866,0.12450248762578023,0.988,1.0,0.09911082424887455,0.009539652027944212,0.061685830635415634,0.24526004965885637,500
+Linear,Logistic,0.9,0.936,0.3503606994029222,0.07357271764564946,0.998,1.0,0.05747974737962404,0.004779092271966848,0.0939884966306456,0.17946572893838733,500
+Linear,Logistic,0.95,0.976,0.41748052299382554,0.07357271764564946,1.0,1.0,0.05747974737962404,0.0017099292842988314,0.0939884966306456,0.17946572893838733,500
diff --git a/results/irm/irm_atte_sensitivity_metadata.csv b/results/irm/irm_atte_sensitivity_metadata.csv
index 2fc4ba5..06469e7 100644
--- a/results/irm/irm_atte_sensitivity_metadata.csv
+++ b/results/irm/irm_atte_sensitivity_metadata.csv
@@ -1,2 +1,2 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,irm_atte_sensitivity.py,2025-05-22 14:47:30,6819.7349536418915,3.12.10
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,IRMATTESensitivityCoverageSimulation,2025-06-05 13:14,37.61970745722453,3.12.3,scripts/irm/irm_atte_sensitivity_config.yml
diff --git a/results/irm/irm_cate_config.yml b/results/irm/irm_cate_config.yml
new file mode 100644
index 0000000..c1206fe
--- /dev/null
+++ b/results/irm/irm_cate_config.yml
@@ -0,0 +1,63 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 500
+ p:
+ - 10
+ support_size:
+ - 5
+ n_x:
+ - 1
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logit: &id002
+ name: Logistic
+ rfr: &id003
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ rfc: &id004
+ name: RF Clas.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ lgbmr: &id005
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ lgbmc: &id006
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id001
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id005
+ ml_m: *id006
+ - ml_g: *id005
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id006
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/irm_cate_coverage.csv b/results/irm/irm_cate_coverage.csv
index 788025e..14cd160 100644
--- a/results/irm/irm_cate_coverage.csv
+++ b/results/irm/irm_cate_coverage.csv
@@ -1,9 +1,15 @@
Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.9356475000000001,0.6670555194228769,0.1482947174222056,1.0,1.689294724011131,1000
-LGBM,LGBM,0.95,0.9723379999999999,0.7948456764390678,0.1482947174222056,1.0,1.6897571332817831,1000
-LGBM,Logistic Regression,0.9,0.8889914999999999,0.2354452920928757,0.05850699314525499,0.996,0.5957379206984681,1000
-LGBM,Logistic Regression,0.95,0.942037,0.28055036951027373,0.05850699314525499,0.996,0.5990789624833718,1000
-Lasso,LGBM,0.9,0.896289,0.6428742805642967,0.15829333167540963,1.0,1.6320898187101593,1000
-Lasso,LGBM,0.95,0.9491539999999999,0.7660319531461224,0.15829333167540963,1.0,1.6337799284392311,1000
-Lasso,Logistic Regression,0.9,0.8892920000000001,0.24738399667726244,0.061668240816652016,0.998,0.629839636678611,1000
-Lasso,Logistic Regression,0.95,0.9413365,0.29477621345410787,0.061668240816652016,0.997,0.6288417962868703,1000
+LGBM Regr.,LGBM Clas.,0.9,0.92506,1.0375110625997357,0.2365318243837263,1.0,2.596617666316375,1000
+LGBM Regr.,LGBM Clas.,0.95,0.96602,1.2362706826540963,0.2365318243837263,1.0,2.6133853117978934,1000
+LGBM Regr.,Logistic,0.9,0.89561,0.45975570518256736,0.11084262270269904,1.0,1.1581396191978983,1000
+LGBM Regr.,Logistic,0.95,0.94487,0.547832712333638,0.11084262270269904,1.0,1.1623001863971778,1000
+Linear,LGBM Clas.,0.9,0.90998,1.0435330991760245,0.2472414883409576,0.998,2.618492131430461,1000
+Linear,LGBM Clas.,0.95,0.95692,1.243446381822529,0.2472414883409576,0.999,2.626489143651275,1000
+Linear,Logistic,0.9,0.89899,0.47571933545085376,0.11363003649173503,1.0,1.1954349623588154,1000
+Linear,Logistic,0.95,0.9459299999999999,0.5668545510405528,0.11363003649173503,0.998,1.1979576781693033,1000
+Linear,RF Clas.,0.9,0.90489,0.5110714313286231,0.12032009678319744,1.0,1.2817085692658767,1000
+Linear,RF Clas.,0.95,0.9514600000000001,0.6089791714706715,0.12032009678319744,1.0,1.2861291474394618,1000
+RF Regr.,Logistic,0.9,0.89376,0.4592745091137625,0.11114309499883832,0.999,1.1543665958014406,1000
+RF Regr.,Logistic,0.95,0.94267,0.5472593318522952,0.11114309499883832,1.0,1.1532136852815742,1000
+RF Regr.,RF Clas.,0.9,0.89648,0.4916798706519477,0.11789806419426764,1.0,1.2340869862770245,1000
+RF Regr.,RF Clas.,0.95,0.9448,0.5858727017474369,0.11789806419426764,1.0,1.2368879574968341,1000
diff --git a/results/irm/irm_cate_coverage_metadata.csv b/results/irm/irm_cate_coverage_metadata.csv
deleted file mode 100644
index 771f45f..0000000
--- a/results/irm/irm_cate_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,irm_cate_coverage.py,2025-05-22 14:27:33,5618.293743133545,3.12.10
diff --git a/results/irm/irm_cate_metadata.csv b/results/irm/irm_cate_metadata.csv
new file mode 100644
index 0000000..4bd0baf
--- /dev/null
+++ b/results/irm/irm_cate_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,IRMCATECoverageSimulation,2025-06-05 13:56,79.61121084690095,3.12.3,scripts/irm/irm_cate_config.yml
diff --git a/results/irm/irm_gate_config.yml b/results/irm/irm_gate_config.yml
new file mode 100644
index 0000000..c1206fe
--- /dev/null
+++ b/results/irm/irm_gate_config.yml
@@ -0,0 +1,63 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 500
+ p:
+ - 10
+ support_size:
+ - 5
+ n_x:
+ - 1
+learner_definitions:
+ linear: &id001
+ name: Linear
+ logit: &id002
+ name: Logistic
+ rfr: &id003
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ rfc: &id004
+ name: RF Clas.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ lgbmr: &id005
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ lgbmc: &id006
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id001
+ ml_m: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ - ml_g: *id005
+ ml_m: *id006
+ - ml_g: *id005
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id006
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/irm_gate_coverage.csv b/results/irm/irm_gate_coverage.csv
index 4208c4b..30f5e71 100644
--- a/results/irm/irm_gate_coverage.csv
+++ b/results/irm/irm_gate_coverage.csv
@@ -1,9 +1,15 @@
Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.941,2.1611526373873855,0.48511146422993706,1.0,5.063646757352436,1000
-LGBM,LGBM,0.95,0.9766666666666667,2.5751722007164304,0.48511146422993706,1.0,5.085406768554818,1000
-LGBM,Logistic Regression,0.9,0.916,0.3904280687581478,0.08854865701618204,0.997,0.9186129412168422,1000
-LGBM,Logistic Regression,0.95,0.9606666666666667,0.4652237383199531,0.08854865701618204,0.998,0.9197763724365766,1000
-Lasso,LGBM,0.9,0.9043333333333333,2.047596498760145,0.49693265710872336,1.0,4.807194790243196,1000
-Lasso,LGBM,0.95,0.959,2.43986171576749,0.49693265710872336,1.0,4.819825928994352,1000
-Lasso,Logistic Regression,0.9,0.9173333333333333,0.400967167206407,0.08997238623406502,0.999,0.9416356317140329,1000
-Lasso,Logistic Regression,0.95,0.9603333333333334,0.4777818486889552,0.08997238623406502,0.999,0.9403831822862944,1000
+LGBM Regr.,LGBM Clas.,0.9,0.9346666666666666,0.8364542894818474,0.18422904419208913,1.0,1.9724400757209293,1000
+LGBM Regr.,LGBM Clas.,0.95,0.9756666666666667,0.9966967608764793,0.18422904419208913,1.0,1.9651735219350885,1000
+LGBM Regr.,Logistic,0.9,0.9006666666666666,0.40046180389774216,0.09740590411838057,0.998,0.9434012510097336,1000
+LGBM Regr.,Logistic,0.95,0.9523333333333334,0.4771796711651552,0.09740590411838057,0.999,0.9366053028101997,1000
+Linear,LGBM Clas.,0.9,0.9226666666666666,0.8421150432332748,0.1918698759485984,1.0,1.9758022948217957,1000
+Linear,LGBM Clas.,0.95,0.9686666666666667,1.003441965006716,0.1918698759485984,1.0,1.9855094815511516,1000
+Linear,Logistic,0.9,0.9123333333333333,0.41818791810731526,0.09904291484604033,1.0,0.9842536479155779,1000
+Linear,Logistic,0.95,0.951,0.4983016390213454,0.09904291484604033,1.0,0.985193431203212,1000
+Linear,RF Clas.,0.9,0.9166666666666666,0.44173892078977606,0.10153218721035738,1.0,1.0388647556648747,1000
+Linear,RF Clas.,0.95,0.9593333333333334,0.5263643895914247,0.10153218721035738,1.0,1.0383096121913078,1000
+RF Regr.,Logistic,0.9,0.9026666666666666,0.4004544456677431,0.0967060927359184,1.0,0.9427533643825874,1000
+RF Regr.,Logistic,0.95,0.9486666666666667,0.4771709032933203,0.0967060927359184,0.999,0.9365571482746528,1000
+RF Regr.,RF Clas.,0.9,0.9026666666666666,0.4211186636375361,0.10090471591950194,1.0,0.9859887811490382,1000
+RF Regr.,RF Clas.,0.95,0.9506666666666667,0.5017938377148734,0.10090471591950194,1.0,0.9865161484854005,1000
diff --git a/results/irm/irm_gate_coverage_metadata.csv b/results/irm/irm_gate_coverage_metadata.csv
deleted file mode 100644
index 00fc71a..0000000
--- a/results/irm/irm_gate_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,irm_gate_coverage.py,2025-05-22 12:06:46,2053.3285892009735,3.12.10
diff --git a/results/irm/irm_gate_metadata.csv b/results/irm/irm_gate_metadata.csv
new file mode 100644
index 0000000..b66fe1e
--- /dev/null
+++ b/results/irm/irm_gate_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,IRMGATECoverageSimulation,2025-06-05 13:55,79.23283307154973,3.12.3,scripts/irm/irm_gate_config.yml
diff --git a/results/irm/lpq_Y0_coverage.csv b/results/irm/lpq_Y0_coverage.csv
new file mode 100644
index 0000000..fa7c0a3
--- /dev/null
+++ b/results/irm/lpq_Y0_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
+LGBM Clas.,LGBM Clas.,0.9,0.938,1.182292560755257,0.23325691846991042,200
+LGBM Clas.,LGBM Clas.,0.95,0.9690000000000001,1.4087884783794826,0.23325691846991042,200
+LGBM Clas.,Logistic,0.9,0.9390000000000001,1.137086069984906,0.22432594978342146,200
+LGBM Clas.,Logistic,0.95,0.9690000000000001,1.3549216221890352,0.22432594978342146,200
+Logistic,LGBM Clas.,0.9,0.938,1.1527775627918269,0.22374215890669022,200
+Logistic,LGBM Clas.,0.95,0.9690000000000001,1.3736191891100717,0.22374215890669022,200
+Logistic,Logistic,0.9,0.943,1.111906655099774,0.2212690310065874,200
+Logistic,Logistic,0.95,0.9690000000000001,1.3249184987998035,0.2212690310065874,200
diff --git a/results/irm/lpq_Y1_coverage.csv b/results/irm/lpq_Y1_coverage.csv
new file mode 100644
index 0000000..ba4fa63
--- /dev/null
+++ b/results/irm/lpq_Y1_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
+LGBM Clas.,LGBM Clas.,0.9,0.946,1.6296637808266206,0.31922961803439465,200
+LGBM Clas.,LGBM Clas.,0.95,0.965,1.9418641665090766,0.31922961803439465,200
+LGBM Clas.,Logistic,0.9,0.94,1.5840129690335032,0.3094090506782375,200
+LGBM Clas.,Logistic,0.95,0.97,1.8874678691647622,0.3094090506782375,200
+Logistic,LGBM Clas.,0.9,0.93,1.5829510778239204,0.31056212030323144,200
+Logistic,LGBM Clas.,0.95,0.965,1.8862025477451665,0.31056212030323144,200
+Logistic,Logistic,0.9,0.941,1.5420148214413294,0.2867899782486625,200
+Logistic,Logistic,0.95,0.97,1.8374239896673397,0.2867899782486625,200
diff --git a/results/irm/lpq_config.yml b/results/irm/lpq_config.yml
new file mode 100644
index 0000000..85abd3f
--- /dev/null
+++ b/results/irm/lpq_config.yml
@@ -0,0 +1,48 @@
+simulation_parameters:
+ repetitions: 200
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 5000
+ dim_x:
+ - 5
+learner_definitions:
+ logit: &id001
+ name: Logistic
+ lgbmc: &id002
+ name: LGBM Clas.
+ params:
+ n_estimators: 200
+ learning_rate: 0.05
+ num_leaves: 15
+ max_depth: 5
+ min_child_samples: 10
+ subsample: 0.9
+ colsample_bytree: 0.9
+ reg_alpha: 0.0
+ reg_lambda: 0.1
+ random_state: 42
+dml_parameters:
+ tau_vec:
+ - - 0.3
+ - 0.4
+ - 0.5
+ - 0.6
+ - 0.7
+ trimming_threshold:
+ - 0.01
+ learners:
+ - ml_g: *id001
+ ml_m: *id001
+ - ml_g: *id002
+ ml_m: *id002
+ - ml_g: *id002
+ ml_m: *id001
+ - ml_g: *id001
+ ml_m: *id002
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/lpq_coverage_lpq0.csv b/results/irm/lpq_coverage_lpq0.csv
deleted file mode 100644
index d563335..0000000
--- a/results/irm/lpq_coverage_lpq0.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,0.9,0.94,1.1717023704378204,0.23180102735661154,100
-LGBM,LGBM,0.95,0.9722222222222223,1.396169488293374,0.23180102735661154,100
-LGBM,Logistic Regression,0.9,0.95,1.1248311184133066,0.2270596212748565,100
-LGBM,Logistic Regression,0.95,0.9755555555555556,1.3403189467174592,0.2270596212748565,100
-Logistic Regression,LGBM,0.9,0.9488888888888889,1.1516756381210271,0.22097455311738945,100
-Logistic Regression,LGBM,0.95,0.9811111111111112,1.372306164879188,0.22097455311738945,100
-Logistic Regression,Logistic Regression,0.9,0.9533333333333333,1.1095156391492575,0.20619468880954653,100
-Logistic Regression,Logistic Regression,0.95,0.9788888888888888,1.3220694275677582,0.20619468880954653,100
diff --git a/results/irm/lpq_coverage_lpq1.csv b/results/irm/lpq_coverage_lpq1.csv
deleted file mode 100644
index 524d87a..0000000
--- a/results/irm/lpq_coverage_lpq1.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,0.9,0.9577777777777777,1.6624989751251327,0.3276833414641952,100
-LGBM,LGBM,0.95,0.99,1.9809897137285786,0.3276833414641952,100
-LGBM,Logistic Regression,0.9,0.9411111111111111,1.5914659538910119,0.32194623632362507,100
-LGBM,Logistic Regression,0.95,0.9711111111111111,1.8963486483773861,0.32194623632362507,100
-Logistic Regression,LGBM,0.9,0.96,1.60784858202307,0.30083496643464896,100
-Logistic Regression,LGBM,0.95,0.9822222222222223,1.915869753833108,0.30083496643464896,100
-Logistic Regression,Logistic Regression,0.9,0.9466666666666668,1.556189836416658,0.304958909712274,100
-Logistic Regression,Logistic Regression,0.95,0.9844444444444445,1.8543145617989472,0.304958909712274,100
diff --git a/results/irm/lpq_coverage_lqte.csv b/results/irm/lpq_coverage_lqte.csv
deleted file mode 100644
index 2e788da..0000000
--- a/results/irm/lpq_coverage_lqte.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.9233333333333333,1.636872420792144,0.38490701730912247,0.94,2.2547716211801454,100
-LGBM,LGBM,0.95,0.9622222222222223,1.950453790824845,0.38490701730912247,0.97,2.5400452907969764,100
-LGBM,Logistic Regression,0.9,0.9088888888888889,1.5675334432220174,0.37520599979698516,0.91,2.1574364137437168,100
-LGBM,Logistic Regression,0.95,0.95,1.8678313030025362,0.37520599979698516,0.95,2.4273297337432616,100
-Logistic Regression,LGBM,0.9,0.9188888888888889,1.6190226009242576,0.3723322348549169,0.92,2.208051172256097,100
-Logistic Regression,LGBM,0.95,0.9644444444444444,1.9291844185850637,0.3723322348549169,0.98,2.487384422889672,100
-Logistic Regression,Logistic Regression,0.9,0.9066666666666667,1.5642497785562741,0.3720685867269132,0.9,2.1368366984439215,100
-Logistic Regression,Logistic Regression,0.95,0.9477777777777777,1.8639185752213465,0.3720685867269132,0.95,2.4083334749081065,100
diff --git a/results/irm/lpq_coverage_metadata.csv b/results/irm/lpq_coverage_metadata.csv
deleted file mode 100644
index 53aaefa..0000000
--- a/results/irm/lpq_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,lpq_coverage.py,2025-05-22 16:36:53,18251.836868286133,3.12.10
diff --git a/results/irm/lpq_effect_coverage.csv b/results/irm/lpq_effect_coverage.csv
new file mode 100644
index 0000000..2e1488a
--- /dev/null
+++ b/results/irm/lpq_effect_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Clas.,LGBM Clas.,0.9,0.882,1.6179566817511408,0.38892858697985444,0.85,2.134204151727998,200
+LGBM Clas.,LGBM Clas.,0.95,0.9329999999999999,1.9279142975508834,0.38892858697985444,0.93,2.4114959415166863,200
+LGBM Clas.,Logistic,0.9,0.907,1.57231832862624,0.36746763608272737,0.865,2.0750759388885753,200
+LGBM Clas.,Logistic,0.95,0.9520000000000001,1.873532845625395,0.36746763608272737,0.935,2.3491301230114727,200
+Logistic,LGBM Clas.,0.9,0.892,1.5819115069451675,0.37355342356664595,0.835,2.0754682406547134,200
+Logistic,LGBM Clas.,0.95,0.943,1.8849638226401801,0.37355342356664595,0.93,2.3505474278366396,200
+Logistic,Logistic,0.9,0.895,1.5376032362171548,0.3646953928818029,0.86,2.0170674200183667,200
+Logistic,Logistic,0.95,0.941,1.8321672616445936,0.3646953928818029,0.91,2.2852783686545495,200
diff --git a/results/irm/lpq_metadata.csv b/results/irm/lpq_metadata.csv
new file mode 100644
index 0000000..47bab20
--- /dev/null
+++ b/results/irm/lpq_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,LPQCoverageSimulation,2025-06-05 14:29,112.94002043803533,3.12.3,scripts/irm/lpq_config.yml
diff --git a/results/irm/pq_Y0_coverage.csv b/results/irm/pq_Y0_coverage.csv
new file mode 100644
index 0000000..ff0b3ac
--- /dev/null
+++ b/results/irm/pq_Y0_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
+LGBM Clas.,LGBM Clas.,0.9,0.8835714285714286,0.5723701824339644,0.14549176231056815,200
+LGBM Clas.,LGBM Clas.,0.95,0.9378571428571427,0.6820211385461397,0.14549176231056815,200
+LGBM Clas.,Logistic,0.9,0.84,0.4044754891818755,0.1130627263596336,200
+LGBM Clas.,Logistic,0.95,0.9078571428571429,0.4819622721658046,0.1130627263596336,200
+Logistic,LGBM Clas.,0.9,0.8878571428571429,0.5701038626834825,0.14084059793538922,200
+Logistic,LGBM Clas.,0.95,0.9328571428571429,0.6793206520009456,0.14084059793538922,200
+Logistic,Logistic,0.9,0.8521428571428571,0.40381464298983716,0.10742954627392248,200
+Logistic,Logistic,0.95,0.9207142857142857,0.4811748253592969,0.10742954627392248,200
diff --git a/results/irm/pq_Y1_coverage.csv b/results/irm/pq_Y1_coverage.csv
new file mode 100644
index 0000000..3cb5336
--- /dev/null
+++ b/results/irm/pq_Y1_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
+LGBM Clas.,LGBM Clas.,0.9,0.9114285714285714,0.25322340912372693,0.05863607300930684,200
+LGBM Clas.,LGBM Clas.,0.95,0.9514285714285714,0.3017343025499488,0.05863607300930684,200
+LGBM Clas.,Logistic,0.9,0.9028571428571429,0.23575937348166215,0.057047735482004806,200
+LGBM Clas.,Logistic,0.95,0.9507142857142857,0.2809246205683309,0.057047735482004806,200
+Logistic,LGBM Clas.,0.9,0.9178571428571429,0.2536257290831553,0.0584307589677001,200
+Logistic,LGBM Clas.,0.95,0.9607142857142857,0.3022136963499933,0.0584307589677001,200
+Logistic,Logistic,0.9,0.8971428571428571,0.2359931637120258,0.05685852446847999,200
+Logistic,Logistic,0.95,0.9407142857142857,0.28120319881015254,0.05685852446847999,200
diff --git a/results/irm/pq_config.yml b/results/irm/pq_config.yml
new file mode 100644
index 0000000..e106878
--- /dev/null
+++ b/results/irm/pq_config.yml
@@ -0,0 +1,50 @@
+simulation_parameters:
+ repetitions: 200
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 5000
+ dim_x:
+ - 5
+learner_definitions:
+ logit: &id001
+ name: Logistic
+ lgbmc: &id002
+ name: LGBM Clas.
+ params:
+ n_estimators: 200
+ learning_rate: 0.05
+ num_leaves: 15
+ max_depth: 5
+ min_child_samples: 10
+ subsample: 0.9
+ colsample_bytree: 0.9
+ reg_alpha: 0.0
+ reg_lambda: 0.1
+ random_state: 42
+dml_parameters:
+ tau_vec:
+ - - 0.2
+ - 0.3
+ - 0.4
+ - 0.5
+ - 0.6
+ - 0.7
+ - 0.8
+ trimming_threshold:
+ - 0.01
+ learners:
+ - ml_g: *id001
+ ml_m: *id001
+ - ml_g: *id002
+ ml_m: *id002
+ - ml_g: *id002
+ ml_m: *id001
+ - ml_g: *id001
+ ml_m: *id002
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/irm/pq_coverage_metadata.csv b/results/irm/pq_coverage_metadata.csv
deleted file mode 100644
index 4074d69..0000000
--- a/results/irm/pq_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,pq_coverage.py,2025-05-22 16:20:49,17287.969739675522,3.12.10
diff --git a/results/irm/pq_coverage_pq0.csv b/results/irm/pq_coverage_pq0.csv
deleted file mode 100644
index e7b946a..0000000
--- a/results/irm/pq_coverage_pq0.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,0.9,0.8823076923076922,0.5804011224570385,0.14917558900311645,100
-LGBM,LGBM,0.95,0.9338461538461539,0.6915905938151775,0.14917558900311645,100
-LGBM,Logistic Regression,0.9,0.8115384615384617,0.38610406439934963,0.11426502373246265,100
-LGBM,Logistic Regression,0.95,0.8869230769230769,0.4600713693350327,0.11426502373246265,100
-Logistic Regression,LGBM,0.9,0.8869230769230769,0.5879335180942487,0.15107204460433907,100
-Logistic Regression,LGBM,0.95,0.9415384615384617,0.7005659968080872,0.15107204460433907,100
-Logistic Regression,Logistic Regression,0.9,0.8223076923076923,0.3894908707564202,0.11249469872415391,100
-Logistic Regression,Logistic Regression,0.95,0.9015384615384616,0.4641069980218067,0.11249469872415391,100
diff --git a/results/irm/pq_coverage_pq1.csv b/results/irm/pq_coverage_pq1.csv
deleted file mode 100644
index ab97622..0000000
--- a/results/irm/pq_coverage_pq1.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,0.9,0.9153846153846155,0.2504138111093655,0.058637832346116046,100
-LGBM,LGBM,0.95,0.9623076923076923,0.298386460025268,0.058637832346116046,100
-LGBM,Logistic Regression,0.9,0.9138461538461539,0.2294851617480571,0.053811638122637194,100
-LGBM,Logistic Regression,0.95,0.9623076923076923,0.273448436166418,0.053811638122637194,100
-Logistic Regression,LGBM,0.9,0.9192307692307692,0.25410039964172587,0.059350250451121786,100
-Logistic Regression,LGBM,0.95,0.9607692307692308,0.3027793012063014,0.059350250451121786,100
-Logistic Regression,Logistic Regression,0.9,0.8976923076923078,0.23093621538720735,0.05722710381019568,100
-Logistic Regression,Logistic Regression,0.95,0.9538461538461539,0.2751774732222205,0.05722710381019568,100
diff --git a/results/irm/pq_coverage_qte.csv b/results/irm/pq_coverage_qte.csv
deleted file mode 100644
index c1c85e9..0000000
--- a/results/irm/pq_coverage_qte.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.8823076923076922,0.6176207568284253,0.15519922876616388,0.84,0.9336715877443045,100
-LGBM,LGBM,0.95,0.9392307692307692,0.7359405236146267,0.15519922876616388,0.91,1.0306538899948843,100
-LGBM,Logistic Regression,0.9,0.8261538461538461,0.4257458252181111,0.1226898281791693,0.78,0.6459797086090959,100
-LGBM,Logistic Regression,0.95,0.9107692307692308,0.5073074408099907,0.1226898281791693,0.84,0.7142375039945987,100
-Logistic Regression,LGBM,0.9,0.8930769230769231,0.6286198118927271,0.1592095091097193,0.87,0.9308233463178209,100
-Logistic Regression,LGBM,0.95,0.9476923076923077,0.7490467060960181,0.1592095091097193,0.92,1.029300136308572,100
-Logistic Regression,Logistic Regression,0.9,0.8415384615384616,0.43446715078562453,0.12422823365772839,0.8,0.6474624649583857,100
-Logistic Regression,Logistic Regression,0.95,0.9123076923076923,0.5176995411949078,0.12422823365772839,0.86,0.719846486999326,100
diff --git a/results/irm/pq_effect_coverage.csv b/results/irm/pq_effect_coverage.csv
new file mode 100644
index 0000000..710de75
--- /dev/null
+++ b/results/irm/pq_effect_coverage.csv
@@ -0,0 +1,9 @@
+Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Clas.,LGBM Clas.,0.9,0.8857142857142857,0.6119091533344946,0.15466123618513827,0.83,0.87431953800783,200
+LGBM Clas.,LGBM Clas.,0.95,0.9392857142857143,0.7291347282790106,0.15466123618513827,0.895,0.9724716396352252,200
+LGBM Clas.,Logistic,0.9,0.8414285714285714,0.44675957755153617,0.12877194950989979,0.725,0.6418465368141598,200
+LGBM Clas.,Logistic,0.95,0.9028571428571429,0.5323468711147352,0.12877194950989979,0.835,0.7132416415357535,200
+Logistic,LGBM Clas.,0.9,0.89,0.6129040235808955,0.15303926661598033,0.83,0.8645516927626398,200
+Logistic,LGBM Clas.,0.95,0.94,0.7303201892952897,0.15303926661598033,0.88,0.9644960349693998,200
+Logistic,Logistic,0.9,0.8592857142857143,0.45040188642196527,0.12179153217631539,0.785,0.6382066580620731,200
+Logistic,Logistic,0.95,0.925,0.536686949824257,0.12179153217631539,0.865,0.7120923308029136,200
diff --git a/results/irm/pq_metadata.csv b/results/irm/pq_metadata.csv
new file mode 100644
index 0000000..bf12575
--- /dev/null
+++ b/results/irm/pq_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,PQCoverageSimulation,2025-06-05 14:33,117.12256911595662,3.12.3,scripts/irm/pq_config.yml
diff --git a/results/irm/ssm_mar_ate_coverage.csv b/results/irm/ssm_mar_ate_coverage.csv
deleted file mode 100644
index 1bb3c80..0000000
--- a/results/irm/ssm_mar_ate_coverage.csv
+++ /dev/null
@@ -1,17 +0,0 @@
-Learner g,Learner m,Learner pi,score,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,LGBM,missing-at-random,0.9,0.934,5.894613599209654,1.524603484598883,1000
-LGBM,LGBM,LGBM,missing-at-random,0.95,0.981,7.023865326329001,1.524603484598883,1000
-LGBM,LGBM,Logistic,missing-at-random,0.9,0.927,2.581588867556288,0.6151192173080174,1000
-LGBM,LGBM,Logistic,missing-at-random,0.95,0.973,3.0761528687981836,0.6151192173080174,1000
-LGBM,Logistic,LGBM,missing-at-random,0.9,0.945,2.5756414334122413,0.6542029876050892,1000
-LGBM,Logistic,LGBM,missing-at-random,0.95,0.985,3.0690660639106513,0.6542029876050892,1000
-LGBM,Logistic,Logistic,missing-at-random,0.9,0.914,0.5399737109966929,0.12672360617482703,1000
-LGBM,Logistic,Logistic,missing-at-random,0.95,0.958,0.6434183618596122,0.12672360617482703,1000
-Lasso,LGBM,LGBM,missing-at-random,0.9,0.939,5.030966633759897,1.2700343898139361,1000
-Lasso,LGBM,LGBM,missing-at-random,0.95,0.981,5.994766493519136,1.2700343898139361,1000
-Lasso,LGBM,Logistic,missing-at-random,0.9,0.887,2.3414252826432578,0.6221402191258447,1000
-Lasso,LGBM,Logistic,missing-at-random,0.95,0.955,2.78998030662317,0.6221402191258447,1000
-Lasso,Logistic,LGBM,missing-at-random,0.9,0.919,2.2995632695758177,0.6130089902400226,1000
-Lasso,Logistic,LGBM,missing-at-random,0.95,0.97,2.7400986414171338,0.6130089902400226,1000
-Lasso,Logistic,Logistic,missing-at-random,0.9,0.897,0.5116626905293399,0.12267262949094253,1000
-Lasso,Logistic,Logistic,missing-at-random,0.95,0.961,0.6096837002627445,0.12267262949094253,1000
diff --git a/results/irm/ssm_mar_ate_coverage_metadata.csv b/results/irm/ssm_mar_ate_coverage_metadata.csv
deleted file mode 100644
index 1653aa7..0000000
--- a/results/irm/ssm_mar_ate_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,ssm_mar_ate_coverage.py,2025-05-22 12:57:04,5067.488474369049,3.12.10
diff --git a/results/irm/ssm_nonignorable_ate_coverage.csv b/results/irm/ssm_nonignorable_ate_coverage.csv
deleted file mode 100644
index 7ba36c8..0000000
--- a/results/irm/ssm_nonignorable_ate_coverage.csv
+++ /dev/null
@@ -1,17 +0,0 @@
-Learner g,Learner m,Learner pi,score,level,Coverage,CI Length,Bias,repetition
-LGBM,LGBM,LGBM,nonignorable,0.9,0.919,13.007296321798503,3.6668819438868456,1000
-LGBM,LGBM,LGBM,nonignorable,0.95,0.966,15.499149534791721,3.6668819438868456,1000
-LGBM,LGBM,Logistic,nonignorable,0.9,0.918,4.832148171492935,1.326909933210586,1000
-LGBM,LGBM,Logistic,nonignorable,0.95,0.974,5.757859683624388,1.326909933210586,1000
-LGBM,Logistic,LGBM,nonignorable,0.9,0.897,4.592630929679517,1.2319933070953262,1000
-LGBM,Logistic,LGBM,nonignorable,0.95,0.959,5.472457286755355,1.2319933070953262,1000
-LGBM,Logistic,Logistic,nonignorable,0.9,0.867,2.5301811727355124,0.727808897144389,1000
-LGBM,Logistic,Logistic,nonignorable,0.95,0.94,3.014896822226011,0.727808897144389,1000
-Lasso,LGBM,LGBM,nonignorable,0.9,0.931,10.203902842326483,2.892343371671614,1000
-Lasso,LGBM,LGBM,nonignorable,0.95,0.975,12.158700169432056,2.892343371671614,1000
-Lasso,LGBM,Logistic,nonignorable,0.9,0.917,7.0625497859220765,2.0505032981783877,1000
-Lasso,LGBM,Logistic,nonignorable,0.95,0.974,8.415547129919013,2.0505032981783877,1000
-Lasso,Logistic,LGBM,nonignorable,0.9,0.894,4.170596833405628,1.1867597841647146,1000
-Lasso,Logistic,LGBM,nonignorable,0.95,0.95,4.969572643774818,1.1867597841647146,1000
-Lasso,Logistic,Logistic,nonignorable,0.9,0.87,1.993316632836819,0.5662899386624597,1000
-Lasso,Logistic,Logistic,nonignorable,0.95,0.928,2.375183266237269,0.5662899386624597,1000
diff --git a/results/irm/ssm_nonignorable_ate_coverage_metadata.csv b/results/irm/ssm_nonignorable_ate_coverage_metadata.csv
deleted file mode 100644
index cd143b6..0000000
--- a/results/irm/ssm_nonignorable_ate_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,ssm_nonignorable_ate_coverage.py,2025-05-22 13:29:53,7036.344848871231,3.12.10
diff --git a/results/plm/pliv_late_config.yml b/results/plm/pliv_late_config.yml
new file mode 100644
index 0000000..9863dcf
--- /dev/null
+++ b/results/plm/pliv_late_config.yml
@@ -0,0 +1,57 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 0.5
+ n_obs:
+ - 500
+ dim_x:
+ - 20
+ dim_z:
+ - 1
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ rf: &id002
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id001
+ ml_r: *id001
+ - ml_g: *id002
+ ml_m: *id002
+ ml_r: *id002
+ - ml_g: *id001
+ ml_m: *id002
+ ml_r: *id002
+ - ml_g: *id002
+ ml_m: *id001
+ ml_r: *id002
+ - ml_g: *id002
+ ml_m: *id002
+ ml_r: *id001
+ - ml_g: *id001
+ ml_m: *id001
+ ml_r: *id002
+ - ml_g: *id002
+ ml_m: *id001
+ ml_r: *id001
+ - ml_g: *id001
+ ml_m: *id002
+ ml_r: *id001
+ score:
+ - partialling out
+ - IV-type
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/plm/pliv_late_coverage.csv b/results/plm/pliv_late_coverage.csv
index 2050685..1a876b3 100644
--- a/results/plm/pliv_late_coverage.csv
+++ b/results/plm/pliv_late_coverage.csv
@@ -1,33 +1,33 @@
-Learner g,Learner m,Learner r,score,level,Coverage,CI Length,Bias,repetition
-Lasso,Lasso,Lasso,IV-type,0.9,0.806,0.23181691237656843,0.071516051616331,1000
-Lasso,Lasso,Lasso,IV-type,0.95,0.873,0.2762268884116072,0.071516051616331,1000
-Lasso,Lasso,Lasso,partialling out,0.9,0.899,0.2994589442321557,0.07151348023026591,1000
-Lasso,Lasso,Lasso,partialling out,0.95,0.947,0.35682734069852273,0.07151348023026591,1000
-Lasso,Lasso,Random Forest,IV-type,0.9,0.809,0.23192657104746503,0.07186817392341269,1000
-Lasso,Lasso,Random Forest,IV-type,0.95,0.864,0.27635755477731183,0.07186817392341269,1000
-Lasso,Lasso,Random Forest,partialling out,0.9,0.899,0.3069995989366334,0.07382308081474215,1000
-Lasso,Lasso,Random Forest,partialling out,0.95,0.947,0.36581258497707936,0.07382308081474215,1000
-Lasso,Random Forest,Lasso,IV-type,0.9,0.827,0.2641841657851808,0.0774047144241511,1000
-Lasso,Random Forest,Lasso,IV-type,0.95,0.901,0.3147948496696171,0.0774047144241511,1000
-Lasso,Random Forest,Lasso,partialling out,0.9,0.901,0.31707874099555505,0.07724285844476118,1000
-Lasso,Random Forest,Lasso,partialling out,0.95,0.953,0.37782262350382784,0.07724285844476118,1000
-Lasso,Random Forest,Random Forest,IV-type,0.9,0.838,0.2649779671547556,0.07652770804553702,1000
-Lasso,Random Forest,Random Forest,IV-type,0.95,0.908,0.3157407223416607,0.07652770804553702,1000
-Lasso,Random Forest,Random Forest,partialling out,0.9,0.874,0.3008906023582805,0.08181190413005493,1000
-Lasso,Random Forest,Random Forest,partialling out,0.95,0.942,0.35853326657508794,0.08181190413005493,1000
-Random Forest,Lasso,Lasso,IV-type,0.9,0.792,0.24230370043873264,0.07631671962244177,1000
-Random Forest,Lasso,Lasso,IV-type,0.95,0.872,0.28872266711104083,0.07631671962244177,1000
-Random Forest,Lasso,Lasso,partialling out,0.9,0.898,0.3308843211589107,0.08053193469589409,1000
-Random Forest,Lasso,Lasso,partialling out,0.95,0.956,0.39427298690546814,0.08053193469589409,1000
-Random Forest,Lasso,Random Forest,IV-type,0.9,0.801,0.24168635877812236,0.07483257666340414,1000
-Random Forest,Lasso,Random Forest,IV-type,0.95,0.877,0.2879870591510822,0.07483257666340414,1000
-Random Forest,Lasso,Random Forest,partialling out,0.9,0.912,0.31921209806412515,0.07628803083364795,1000
-Random Forest,Lasso,Random Forest,partialling out,0.95,0.959,0.38036467524147144,0.07628803083364795,1000
-Random Forest,Random Forest,Lasso,IV-type,0.9,0.822,0.2782151587448532,0.08043790541094972,1000
-Random Forest,Random Forest,Lasso,IV-type,0.95,0.888,0.331513808984715,0.08043790541094972,1000
-Random Forest,Random Forest,Lasso,partialling out,0.9,0.782,0.35119060752265596,0.10740217708024272,1000
-Random Forest,Random Forest,Lasso,partialling out,0.95,0.859,0.4184694195123385,0.10740217708024272,1000
-Random Forest,Random Forest,Random Forest,IV-type,0.9,0.826,0.27438186022140504,0.08018818357971656,1000
-Random Forest,Random Forest,Random Forest,IV-type,0.95,0.889,0.32694615206689337,0.08018818357971656,1000
-Random Forest,Random Forest,Random Forest,partialling out,0.9,0.863,0.3038242308513539,0.07846342744816766,1000
-Random Forest,Random Forest,Random Forest,partialling out,0.95,0.926,0.3620289005307371,0.07846342744816766,1000
+Learner g,Learner m,Learner r,Score,level,Coverage,CI Length,Bias,repetition
+LassoCV,LassoCV,LassoCV,IV-type,0.9,0.7993019197207679,0.23180266726620893,0.07317965760520444,573
+LassoCV,LassoCV,LassoCV,IV-type,0.95,0.8603839441535777,0.27620991431567365,0.07317965760520444,573
+LassoCV,LassoCV,LassoCV,partialling out,0.9,0.8795811518324608,0.3005968104977072,0.07254739125260629,573
+LassoCV,LassoCV,LassoCV,partialling out,0.95,0.9493891797556719,0.3581831919810696,0.07254739125260629,573
+LassoCV,LassoCV,RF Regr.,IV-type,0.9,0.806282722513089,0.23262465528448026,0.07326064389971966,573
+LassoCV,LassoCV,RF Regr.,IV-type,0.95,0.8638743455497382,0.27718937345120853,0.07326064389971966,573
+LassoCV,LassoCV,RF Regr.,partialling out,0.9,0.8848167539267016,0.3081195890287047,0.07427328983504944,573
+LassoCV,LassoCV,RF Regr.,partialling out,0.95,0.9581151832460733,0.3671471354851205,0.07427328983504944,573
+LassoCV,RF Regr.,LassoCV,IV-type,0.9,0.8132635253054101,0.2659516317640923,0.07903719894266115,573
+LassoCV,RF Regr.,LassoCV,IV-type,0.95,0.8900523560209425,0.31690091528287584,0.07903719894266115,573
+LassoCV,RF Regr.,LassoCV,partialling out,0.9,0.8830715532286213,0.3186854967173057,0.07809285377225214,573
+LassoCV,RF Regr.,LassoCV,partialling out,0.95,0.9476439790575916,0.379737191034327,0.07809285377225214,573
+LassoCV,RF Regr.,RF Regr.,IV-type,0.9,0.8254799301919721,0.2668640644386955,0.07668266567351469,573
+LassoCV,RF Regr.,RF Regr.,IV-type,0.95,0.893542757417103,0.31798814587363317,0.07668266567351469,573
+LassoCV,RF Regr.,RF Regr.,partialling out,0.9,0.8656195462478184,0.30294245283479354,0.08542062639988371,573
+LassoCV,RF Regr.,RF Regr.,partialling out,0.95,0.93717277486911,0.36097819721799285,0.08542062639988371,573
+RF Regr.,LassoCV,LassoCV,IV-type,0.9,0.7818499127399651,0.24228998675825072,0.07584491443329881,573
+RF Regr.,LassoCV,LassoCV,IV-type,0.95,0.8586387434554974,0.28870632625286374,0.07584491443329881,573
+RF Regr.,LassoCV,LassoCV,partialling out,0.9,0.8970331588132635,0.3318879094683072,0.0806654890480541,573
+RF Regr.,LassoCV,LassoCV,partialling out,0.95,0.9406631762652705,0.3954688361345379,0.0806654890480541,573
+RF Regr.,LassoCV,RF Regr.,IV-type,0.9,0.8027923211169284,0.241628366516599,0.07544233872660495,573
+RF Regr.,LassoCV,RF Regr.,IV-type,0.95,0.8673647469458988,0.28791795710935314,0.07544233872660495,573
+RF Regr.,LassoCV,RF Regr.,partialling out,0.9,0.8952879581151832,0.32006834738062506,0.07708638562807664,573
+RF Regr.,LassoCV,RF Regr.,partialling out,0.95,0.9476439790575916,0.3813849592318696,0.07708638562807664,573
+RF Regr.,RF Regr.,LassoCV,IV-type,0.9,0.8184991273996509,0.2791009597646858,0.08075894727233103,573
+RF Regr.,RF Regr.,LassoCV,IV-type,0.95,0.8848167539267016,0.3325693060015278,0.08075894727233103,573
+RF Regr.,RF Regr.,LassoCV,partialling out,0.9,0.806282722513089,0.35157995852822205,0.10562690570040405,573
+RF Regr.,RF Regr.,LassoCV,partialling out,0.95,0.8726003490401396,0.4189333598507066,0.10562690570040405,573
+RF Regr.,RF Regr.,RF Regr.,IV-type,0.9,0.8359511343804538,0.2769681338448809,0.07840964131824099,573
+RF Regr.,RF Regr.,RF Regr.,IV-type,0.95,0.8830715532286213,0.33002788716667447,0.07840964131824099,573
+RF Regr.,RF Regr.,RF Regr.,partialling out,0.9,0.8534031413612565,0.304252712059565,0.07977048559824199,573
+RF Regr.,RF Regr.,RF Regr.,partialling out,0.95,0.9179755671902269,0.36253946738141946,0.07977048559824199,573
diff --git a/results/plm/pliv_late_coverage_metadata.csv b/results/plm/pliv_late_coverage_metadata.csv
deleted file mode 100644
index 7b97bb6..0000000
--- a/results/plm/pliv_late_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,pliv_late_coverage.py,2025-05-22 16:55:09,19353.43654370308,3.12.10
diff --git a/results/plm/pliv_late_metadata.csv b/results/plm/pliv_late_metadata.csv
new file mode 100644
index 0000000..18f9cba
--- /dev/null
+++ b/results/plm/pliv_late_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,PLIVLATECoverageSimulation,2025-06-05 18:09,333.49471075932183,3.12.3,scripts/plm/pliv_late_config.yml
diff --git a/results/plm/plr_ate_config.yml b/results/plm/plr_ate_config.yml
index af73b18..d504ba6 100644
--- a/results/plm/plr_ate_config.yml
+++ b/results/plm/plr_ate_config.yml
@@ -1,386 +1,50 @@
-confidence_parameters:
- level:
- - 0.95
- - 0.9
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
dgp_parameters:
- dim_x:
- - 20
- n_obs:
- - 500
theta:
- 0.5
-dml_parameters:
- learners:
- - ml_g: !!python/tuple
- - Lasso
- - !!python/object:sklearn.linear_model._coordinate_descent.LassoCV
- _sklearn_version: 1.5.2
- alphas: null
- copy_X: true
- cv: null
- eps: 0.001
- fit_intercept: true
- max_iter: 1000
- n_alphas: 100
- n_jobs: null
- positive: false
- precompute: auto
- random_state: null
- selection: cyclic
- tol: 0.0001
- verbose: false
- ml_m: !!python/tuple
- - Lasso
- - !!python/object:sklearn.linear_model._coordinate_descent.LassoCV
- _sklearn_version: 1.5.2
- alphas: null
- copy_X: true
- cv: null
- eps: 0.001
- fit_intercept: true
- max_iter: 1000
- n_alphas: 100
- n_jobs: null
- positive: false
- precompute: auto
- random_state: null
- selection: cyclic
- tol: 0.0001
- verbose: false
- - ml_g: !!python/tuple
- - Random Forest
- - !!python/object:sklearn.ensemble._forest.RandomForestRegressor
- _sklearn_version: 1.5.2
- bootstrap: true
- ccp_alpha: 0.0
- class_weight: null
- criterion: squared_error
- estimator: !!python/object:sklearn.tree._classes.DecisionTreeRegressor
- _sklearn_version: 1.5.2
- ccp_alpha: 0.0
- class_weight: null
- criterion: squared_error
- max_depth: null
- max_features: null
- max_leaf_nodes: null
- min_impurity_decrease: 0.0
- min_samples_leaf: 1
- min_samples_split: 2
- min_weight_fraction_leaf: 0.0
- monotonic_cst: null
- random_state: null
- splitter: best
- estimator_params: &id001 !!python/tuple
- - criterion
- - max_depth
- - min_samples_split
- - min_samples_leaf
- - min_weight_fraction_leaf
- - max_features
- - max_leaf_nodes
- - min_impurity_decrease
- - random_state
- - ccp_alpha
- - monotonic_cst
- max_depth: 5
- max_features: 10
- max_leaf_nodes: null
- max_samples: null
- min_impurity_decrease: 0.0
- min_samples_leaf: 20
- min_samples_split: 2
- min_weight_fraction_leaf: 0.0
- monotonic_cst: null
- n_estimators: 200
- n_jobs: null
- oob_score: false
- random_state: null
- verbose: 0
- warm_start: false
- ml_m: !!python/tuple
- - Random Forest
- - !!python/object:sklearn.ensemble._forest.RandomForestRegressor
- _sklearn_version: 1.5.2
- bootstrap: true
- ccp_alpha: 0.0
- class_weight: null
- criterion: squared_error
- estimator: !!python/object:sklearn.tree._classes.DecisionTreeRegressor
- _sklearn_version: 1.5.2
- ccp_alpha: 0.0
- class_weight: null
- criterion: squared_error
- max_depth: null
- max_features: null
- max_leaf_nodes: null
- min_impurity_decrease: 0.0
- min_samples_leaf: 1
- min_samples_split: 2
- min_weight_fraction_leaf: 0.0
- monotonic_cst: null
- random_state: null
- splitter: best
- estimator_params: *id001
- max_depth: 5
- max_features: 10
- max_leaf_nodes: null
- max_samples: null
- min_impurity_decrease: 0.0
- min_samples_leaf: 20
- min_samples_split: 2
- min_weight_fraction_leaf: 0.0
- monotonic_cst: null
+ n_obs:
+ - 500
+ dim_x:
+ - 20
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ rf: &id002
+ name: RF Regr.
+ params:
n_estimators: 200
- n_jobs: null
- oob_score: false
- random_state: null
- verbose: 0
- warm_start: false
- - ml_g: !!python/tuple
- - Lasso
- - !!python/object:sklearn.linear_model._coordinate_descent.LassoCV
- _sklearn_version: 1.5.2
- alphas: null
- copy_X: true
- cv: null
- eps: 0.001
- fit_intercept: true
- max_iter: 1000
- n_alphas: 100
- n_jobs: null
- positive: false
- precompute: auto
- random_state: null
- selection: cyclic
- tol: 0.0001
- verbose: false
- ml_m: !!python/tuple
- - Random Forest
- - !!python/object:sklearn.ensemble._forest.RandomForestRegressor
- _sklearn_version: 1.5.2
- bootstrap: true
- ccp_alpha: 0.0
- class_weight: null
- criterion: squared_error
- estimator: !!python/object:sklearn.tree._classes.DecisionTreeRegressor
- _sklearn_version: 1.5.2
- ccp_alpha: 0.0
- class_weight: null
- criterion: squared_error
- max_depth: null
- max_features: null
- max_leaf_nodes: null
- min_impurity_decrease: 0.0
- min_samples_leaf: 1
- min_samples_split: 2
- min_weight_fraction_leaf: 0.0
- monotonic_cst: null
- random_state: null
- splitter: best
- estimator_params: *id001
- max_depth: 5
max_features: 10
- max_leaf_nodes: null
- max_samples: null
- min_impurity_decrease: 0.0
- min_samples_leaf: 20
- min_samples_split: 2
- min_weight_fraction_leaf: 0.0
- monotonic_cst: null
- n_estimators: 200
- n_jobs: null
- oob_score: false
- random_state: null
- verbose: 0
- warm_start: false
- - ml_g: !!python/tuple
- - Random Forest
- - !!python/object:sklearn.ensemble._forest.RandomForestRegressor
- _sklearn_version: 1.5.2
- bootstrap: true
- ccp_alpha: 0.0
- class_weight: null
- criterion: squared_error
- estimator: !!python/object:sklearn.tree._classes.DecisionTreeRegressor
- _sklearn_version: 1.5.2
- ccp_alpha: 0.0
- class_weight: null
- criterion: squared_error
- max_depth: null
- max_features: null
- max_leaf_nodes: null
- min_impurity_decrease: 0.0
- min_samples_leaf: 1
- min_samples_split: 2
- min_weight_fraction_leaf: 0.0
- monotonic_cst: null
- random_state: null
- splitter: best
- estimator_params: *id001
max_depth: 5
- max_features: 10
- max_leaf_nodes: null
- max_samples: null
- min_impurity_decrease: 0.0
min_samples_leaf: 20
- min_samples_split: 2
- min_weight_fraction_leaf: 0.0
- monotonic_cst: null
- n_estimators: 200
- n_jobs: null
- oob_score: false
- random_state: null
- verbose: 0
- warm_start: false
- ml_m: !!python/tuple
- - Lasso
- - !!python/object:sklearn.linear_model._coordinate_descent.LassoCV
- _sklearn_version: 1.5.2
- alphas: null
- copy_X: true
- cv: null
- eps: 0.001
- fit_intercept: true
- max_iter: 1000
- n_alphas: 100
- n_jobs: null
- positive: false
- precompute: auto
- random_state: null
- selection: cyclic
- tol: 0.0001
- verbose: false
- - ml_g: !!python/tuple
- - LGBM
- - !!python/object:lightgbm.sklearn.LGBMRegressor
- _Booster: null
- _best_iteration: -1
- _best_score: {}
- _class_map: null
- _class_weight: null
- _classes: null
- _evals_result: {}
- _n_classes: -1
- _n_features: -1
- _n_features_in: -1
- _objective: null
- _other_params:
- verbose: -1
- boosting_type: gbdt
- class_weight: null
- colsample_bytree: 1.0
- importance_type: split
- learning_rate: 0.01
- max_depth: -1
- min_child_samples: 20
- min_child_weight: 0.001
- min_split_gain: 0.0
+ lgbm: &id003
+ name: LGBM Regr.
+ params:
n_estimators: 500
- n_jobs: 1
- num_leaves: 31
- objective: null
- random_state: null
- reg_alpha: 0.0
- reg_lambda: 0.0
- subsample: 1.0
- subsample_for_bin: 200000
- subsample_freq: 0
- verbose: -1
- ml_m: !!python/tuple
- - LGBM
- - !!python/object:lightgbm.sklearn.LGBMRegressor
- _Booster: null
- _best_iteration: -1
- _best_score: {}
- _class_map: null
- _class_weight: null
- _classes: null
- _evals_result: {}
- _n_classes: -1
- _n_features: -1
- _n_features_in: -1
- _objective: null
- _other_params:
- verbose: -1
- boosting_type: gbdt
- class_weight: null
- colsample_bytree: 1.0
- importance_type: split
learning_rate: 0.01
- max_depth: -1
- min_child_samples: 20
- min_child_weight: 0.001
- min_split_gain: 0.0
- n_estimators: 500
- n_jobs: 1
- num_leaves: 31
- objective: null
- random_state: null
- reg_alpha: 0.0
- reg_lambda: 0.0
- subsample: 1.0
- subsample_for_bin: 200000
- subsample_freq: 0
- verbose: -1
- - ml_g: !!python/tuple
- - LGBM
- - !!python/object:lightgbm.sklearn.LGBMRegressor
- _Booster: null
- _best_iteration: -1
- _best_score: {}
- _class_map: null
- _class_weight: null
- _classes: null
- _evals_result: {}
- _n_classes: -1
- _n_features: -1
- _n_features_in: -1
- _objective: null
- _other_params:
- verbose: -1
- boosting_type: gbdt
- class_weight: null
- colsample_bytree: 1.0
- importance_type: split
- learning_rate: 0.01
- max_depth: -1
- min_child_samples: 20
- min_child_weight: 0.001
- min_split_gain: 0.0
- n_estimators: 500
- n_jobs: 1
- num_leaves: 31
- objective: null
- random_state: null
- reg_alpha: 0.0
- reg_lambda: 0.0
- subsample: 1.0
- subsample_for_bin: 200000
- subsample_freq: 0
- verbose: -1
- ml_m: !!python/tuple
- - Lasso
- - !!python/object:sklearn.linear_model._coordinate_descent.LassoCV
- _sklearn_version: 1.5.2
- alphas: null
- copy_X: true
- cv: null
- eps: 0.001
- fit_intercept: true
- max_iter: 1000
- n_alphas: 100
- n_jobs: null
- positive: false
- precompute: auto
- random_state: null
- selection: cyclic
- tol: 0.0001
- verbose: false
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id001
+ - ml_g: *id002
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id002
+ ml_m: *id001
+ - ml_g: *id003
+ ml_m: *id003
+ - ml_g: *id003
+ ml_m: *id001
+ - ml_g: *id001
+ ml_m: *id003
score:
- partialling out
- IV-type
-simulation_parameters:
- max_runtime: 19800
- n_jobs: -2
- random_seed: 42
- repetitions: 1000
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/plm/plr_ate_coverage.csv b/results/plm/plr_ate_coverage.csv
index db54406..751fcac 100644
--- a/results/plm/plr_ate_coverage.csv
+++ b/results/plm/plr_ate_coverage.csv
@@ -1,17 +1,29 @@
-Learner g,Learner m,score,level,Coverage,CI Length,Bias,repetition
-Lasso,Lasso,IV-type,0.9,0.881,0.1393979255576113,0.0352891099128789,1000
-Lasso,Lasso,IV-type,0.95,0.945,0.16610287331091153,0.0352891099128789,1000
-Lasso,Lasso,partialling out,0.9,0.908,0.14646362984437974,0.034686755904342816,1000
-Lasso,Lasso,partialling out,0.95,0.956,0.17452217926042807,0.034686755904342816,1000
-Lasso,Random Forest,IV-type,0.9,0.903,0.14665557372989238,0.03608539101919554,1000
-Lasso,Random Forest,IV-type,0.95,0.959,0.17475089450687506,0.03608539101919554,1000
-Lasso,Random Forest,partialling out,0.9,0.817,0.1432734763096462,0.04196371030845358,1000
-Lasso,Random Forest,partialling out,0.95,0.885,0.17072087686440923,0.04196371030845358,1000
-Random Forest,Lasso,IV-type,0.9,0.875,0.14194433574047324,0.0357528457566247,1000
-Random Forest,Lasso,IV-type,0.95,0.95,0.16913710819144942,0.0357528457566247,1000
-Random Forest,Lasso,partialling out,0.9,0.907,0.15192454689522888,0.036047683886664836,1000
-Random Forest,Lasso,partialling out,0.95,0.948,0.18102926327498683,0.036047683886664836,1000
-Random Forest,Random Forest,IV-type,0.9,0.897,0.14940035190827072,0.036842562593482744,1000
-Random Forest,Random Forest,IV-type,0.95,0.953,0.17802149943306753,0.036842562593482744,1000
-Random Forest,Random Forest,partialling out,0.9,0.878,0.14629982233202476,0.03724671506945692,1000
-Random Forest,Random Forest,partialling out,0.95,0.941,0.1743269905704728,0.03724671506945692,1000
+Learner g,Learner m,Score,level,Coverage,CI Length,Bias,repetition
+LGBM Regr.,LGBM Regr.,IV-type,0.9,0.885,0.15983740265821775,0.041136505215158464,1000
+LGBM Regr.,LGBM Regr.,IV-type,0.95,0.935,0.19045801246956545,0.041136505215158464,1000
+LGBM Regr.,LGBM Regr.,partialling out,0.9,0.824,0.14652019534658833,0.04246199234429185,1000
+LGBM Regr.,LGBM Regr.,partialling out,0.95,0.893,0.17458958121357435,0.04246199234429185,1000
+LGBM Regr.,LassoCV,IV-type,0.9,0.883,0.14837154235030955,0.037547727902696455,1000
+LGBM Regr.,LassoCV,IV-type,0.95,0.937,0.17679559723270477,0.037547727902696455,1000
+LGBM Regr.,LassoCV,partialling out,0.9,0.887,0.15933257799041745,0.04021756501464064,1000
+LGBM Regr.,LassoCV,partialling out,0.95,0.941,0.1898564767759428,0.04021756501464064,1000
+LassoCV,LGBM Regr.,IV-type,0.9,0.874,0.1504138026201527,0.0384034628252421,1000
+LassoCV,LGBM Regr.,IV-type,0.95,0.945,0.17922910043953308,0.0384034628252421,1000
+LassoCV,LGBM Regr.,partialling out,0.9,0.521,0.13901722228563204,0.06873936210074709,1000
+LassoCV,LGBM Regr.,partialling out,0.95,0.64,0.16564923738267465,0.06873936210074709,1000
+LassoCV,LassoCV,IV-type,0.9,0.876,0.13984950388376818,0.03654175128573881,1000
+LassoCV,LassoCV,IV-type,0.95,0.934,0.16664096207514204,0.03654175128573881,1000
+LassoCV,LassoCV,partialling out,0.9,0.9,0.1468437970720089,0.03588220373374918,1000
+LassoCV,LassoCV,partialling out,0.95,0.946,0.17497517645242536,0.03588220373374918,1000
+LassoCV,RF Regr.,IV-type,0.9,0.837,0.13013644240026234,0.036636608615855826,1000
+LassoCV,RF Regr.,IV-type,0.95,0.907,0.1550671354589842,0.036636608615855826,1000
+LassoCV,RF Regr.,partialling out,0.9,0.773,0.14296223702800953,0.046042984436838075,1000
+LassoCV,RF Regr.,partialling out,0.95,0.859,0.17035001238590083,0.046042984436838075,1000
+RF Regr.,LassoCV,IV-type,0.9,0.884,0.141016616428934,0.03611493633659719,1000
+RF Regr.,LassoCV,IV-type,0.95,0.929,0.168031662449296,0.03611493633659719,1000
+RF Regr.,LassoCV,partialling out,0.9,0.885,0.15062723475769935,0.037683080056869614,1000
+RF Regr.,LassoCV,partialling out,0.95,0.943,0.1794834205175513,0.037683080056869614,1000
+RF Regr.,RF Regr.,IV-type,0.9,0.841,0.1314513341669066,0.037780418069974564,1000
+RF Regr.,RF Regr.,IV-type,0.95,0.9,0.15663392563651957,0.037780418069974564,1000
+RF Regr.,RF Regr.,partialling out,0.9,0.876,0.14238380316163346,0.0364464310437898,1000
+RF Regr.,RF Regr.,partialling out,0.95,0.929,0.16966076592228904,0.0364464310437898,1000
diff --git a/results/plm/plr_ate_coverage_metadata.csv b/results/plm/plr_ate_coverage_metadata.csv
deleted file mode 100644
index f234205..0000000
--- a/results/plm/plr_ate_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,plr_ate_coverage.py,2025-05-22 13:19:23,6440.37045955658,3.12.3
diff --git a/results/plm/plr_ate_metadata.csv b/results/plm/plr_ate_metadata.csv
index 16eb5fb..c6aa9c1 100644
--- a/results/plm/plr_ate_metadata.csv
+++ b/results/plm/plr_ate_metadata.csv
@@ -1,2 +1,2 @@
DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
-0.10.dev0,PLRATECoverageSimulation,2025-04-28 10:01,17.98821387688319,3.12.9,scripts/plm/plr_ate_config.yml
+0.11.dev0,PLRATECoverageSimulation,2025-06-05 15:50,194.21264092922212,3.12.3,scripts/plm/plr_ate_config.yml
diff --git a/results/plm/plr_ate_sensitivity.csv b/results/plm/plr_ate_sensitivity.csv
deleted file mode 100644
index a49f572..0000000
--- a/results/plm/plr_ate_sensitivity.csv
+++ /dev/null
@@ -1,17 +0,0 @@
-Learner g,Learner m,score,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition
-LGBM,LGBM,IV-type,0.9,0.49,1.2836841849471865,0.642852034975727,1.0,0.998,0.0883267598773138,0.025200860015212666,1.3451446703513257,0.27137365708627864,500
-LGBM,LGBM,IV-type,0.95,0.65,1.529604050351386,0.642852034975727,1.0,1.0,0.0883267598773138,0.013790413915027114,1.3451446703513257,0.27137365708627864,500
-LGBM,LGBM,partialling out,0.9,0.052,1.0555526522517718,0.9216896766797483,1.0,0.878,0.12290629723561408,0.0667859903596657,1.6463724503661545,0.2830593598175856,500
-LGBM,LGBM,partialling out,0.95,0.114,1.2577685626857555,0.9216896766797483,1.0,0.962,0.12290629723561408,0.05210975377693211,1.6463724503661545,0.2830593598175856,500
-LGBM,Random Forest,IV-type,0.9,0.078,1.1807040473318011,0.9305252037122888,1.0,0.948,0.11737861374914874,0.05859551246754723,1.6961397857763842,0.26681869054967733,500
-LGBM,Random Forest,IV-type,0.95,0.168,1.4068956478881136,0.9305252037122888,1.0,0.992,0.11737861374914874,0.04349989594031229,1.6961397857763842,0.26681869054967733,500
-LGBM,Random Forest,partialling out,0.9,0.084,1.242402642005516,0.994677943081587,1.0,0.926,0.11839290118360876,0.06017561051060185,1.8096093868200793,0.29091837395052383,500
-LGBM,Random Forest,partialling out,0.95,0.154,1.4804140579616833,0.994677943081587,1.0,0.978,0.11839290118360876,0.04521577389281383,1.8096093868200793,0.29091837395052383,500
-Random Forest,LGBM,IV-type,0.9,0.576,1.9010430543765071,0.8897616306557555,1.0,0.998,0.07213108029374843,0.01823318209099959,2.122769568430025,0.4620602752705221,500
-Random Forest,LGBM,IV-type,0.95,0.75,2.265232515882643,0.8897616306557555,1.0,1.0,0.07213108029374843,0.008535492348285852,2.122769568430025,0.4620602752705221,500
-Random Forest,LGBM,partialling out,0.9,0.002,1.5135381055089399,1.5758793346465376,1.0,0.828,0.12842168619163824,0.08071664417065783,2.7774438145541147,0.40467501778184206,500
-Random Forest,LGBM,partialling out,0.95,0.008,1.803491889746142,1.5758793346465376,1.0,0.942,0.12842168619163824,0.06722056399959782,2.7774438145541147,0.40467501778184206,500
-Random Forest,Random Forest,IV-type,0.9,0.01,1.7160774922768312,1.6169678600750872,1.0,0.896,0.11938462955718954,0.07029811586923293,2.94609158848587,0.39721850987868246,500
-Random Forest,Random Forest,IV-type,0.95,0.038,2.0448324546519867,1.6169678600750872,1.0,0.972,0.11938462955718954,0.056429904585180324,2.94609158848587,0.39721850987868246,500
-Random Forest,Random Forest,partialling out,0.9,0.0,1.738561053384026,1.7359527505574848,1.0,0.806,0.1280019015187021,0.07819909367227408,3.0609083438237703,0.46246741584476964,500
-Random Forest,Random Forest,partialling out,0.95,0.016,2.0716232701338355,1.7359527505574848,1.0,0.94,0.1280019015187021,0.06411936004235119,3.0609083438237703,0.46246741584476964,500
diff --git a/results/plm/plr_ate_sensitivity_config.yml b/results/plm/plr_ate_sensitivity_config.yml
new file mode 100644
index 0000000..f575860
--- /dev/null
+++ b/results/plm/plr_ate_sensitivity_config.yml
@@ -0,0 +1,49 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 0.5
+ n_obs:
+ - 1000
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ rf: &id002
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 10
+ max_depth: 5
+ min_samples_leaf: 2
+ lgbm: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.05
+ min_child_samples: 5
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id001
+ - ml_g: *id002
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id002
+ ml_m: *id001
+ - ml_g: *id003
+ ml_m: *id003
+ - ml_g: *id003
+ ml_m: *id001
+ - ml_g: *id001
+ ml_m: *id003
+ score:
+ - partialling out
+ - IV-type
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/plm/plr_ate_sensitivity_coverage.csv b/results/plm/plr_ate_sensitivity_coverage.csv
new file mode 100644
index 0000000..cd2031e
--- /dev/null
+++ b/results/plm/plr_ate_sensitivity_coverage.csv
@@ -0,0 +1,29 @@
+Learner g,Learner m,Score,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition
+LGBM Regr.,LGBM Regr.,IV-type,0.9,0.388,1.4093565939481743,0.7570214454341684,1.0,0.992,0.10345660369135473,0.03229286770316017,1.4538012313284263,0.2824074286090704,1000
+LGBM Regr.,LGBM Regr.,IV-type,0.95,0.577,1.6793519619323294,0.7570214454341684,1.0,1.0,0.10345660369135473,0.018230572464796476,1.4538012313284263,0.2824074286090704,1000
+LGBM Regr.,LGBM Regr.,partialling out,0.9,0.205,1.10817064601718,0.74843530520864,1.0,0.973,0.10230834250567276,0.04378686178903142,1.4469171977836348,0.26847190174256996,1000
+LGBM Regr.,LGBM Regr.,partialling out,0.95,0.328,1.3204667694010184,0.74843530520864,1.0,0.995,0.10230834250567276,0.02998497094011609,1.4469171977836348,0.26847190174256996,1000
+LGBM Regr.,LassoCV,IV-type,0.9,0.02,1.521119145022205,1.4608769656478595,1.0,0.359,0.1858831134654844,0.10989835156011962,2.1959840483158897,0.7297672680739162,1000
+LGBM Regr.,LassoCV,IV-type,0.95,0.045,1.8125252554924385,1.4608769656478595,1.0,0.58,0.1858831134654844,0.08960405454511797,2.1959840483158897,0.7297672680739162,1000
+LGBM Regr.,LassoCV,partialling out,0.9,0.021,1.5093401960317487,1.3250609990655347,1.0,0.537,0.17222024016738868,0.09586170007661886,2.053549895430398,0.6003221955363113,1000
+LGBM Regr.,LassoCV,partialling out,0.95,0.079,1.7984897720799624,1.3250609990655347,1.0,0.765,0.17222024016738868,0.07549935380647549,2.053549895430398,0.6003221955363113,1000
+LassoCV,LGBM Regr.,IV-type,0.9,0.748,2.5133664291353095,1.0223715401643154,1.0,1.0,0.06817434769512887,0.010427835528562208,2.534255845067475,0.5860957184805163,1000
+LassoCV,LGBM Regr.,IV-type,0.95,0.926,2.9948608194317967,1.0223715401643154,1.0,1.0,0.06817434769512887,0.0031611418652559196,2.534255845067475,0.5860957184805163,1000
+LassoCV,LGBM Regr.,partialling out,0.9,0.605,1.9815232621302428,0.9121069506452848,1.0,1.0,0.06053882318785803,0.012365997991750076,2.4395495885220395,0.6451231506011218,1000
+LassoCV,LGBM Regr.,partialling out,0.95,0.833,2.361130598290116,0.9121069506452848,1.0,1.0,0.06053882318785803,0.004652476916621015,2.4395495885220395,0.6451231506011218,1000
+LassoCV,LassoCV,IV-type,0.9,0.0,2.5877087930438964,4.872772215540649,1.0,0.0,0.28270722007024496,0.22413939259803953,6.407549231579281,3.3379951995020174,1000
+LassoCV,LassoCV,IV-type,0.95,0.001,3.0834452097987697,4.872772215540649,1.0,0.001,0.28270722007024496,0.20760827505991444,6.407549231579281,3.3379951995020174,1000
+LassoCV,LassoCV,partialling out,0.9,0.0,2.6022738054015564,4.872971572776508,1.0,0.0,0.2826615836284171,0.22383756375128439,6.408028359909112,3.337914785643903,1000
+LassoCV,LassoCV,partialling out,0.95,0.001,3.1008004924741663,4.872971572776508,1.0,0.001,0.2826615836284171,0.2072284307839625,6.408028359909112,3.337914785643903,1000
+LassoCV,RF Regr.,IV-type,0.9,0.03,2.230177042929521,1.7208906933423658,1.0,0.996,0.10321605320265151,0.050929864370534886,3.379395087094497,0.32699487018563966,1000
+LassoCV,RF Regr.,IV-type,0.95,0.104,2.6574198528480117,1.7208906933423658,1.0,0.999,0.10321605320265151,0.036611667122436437,3.379395087094497,0.32699487018563966,1000
+LassoCV,RF Regr.,partialling out,0.9,0.035,2.2613273518524517,1.6654740653451128,1.0,1.0,0.09838693440861622,0.046224192363929564,3.352306820686065,0.30396867960100216,1000
+LassoCV,RF Regr.,partialling out,0.95,0.126,2.6945377353123594,1.6654740653451128,1.0,1.0,0.09838693440861622,0.031946038970425784,3.352306820686065,0.30396867960100216,1000
+RF Regr.,LassoCV,IV-type,0.9,0.001,1.9755980491263099,2.4911901944187225,1.0,0.146,0.18765120739490848,0.13125708709313408,3.74493409869544,1.2384844937405288,1000
+RF Regr.,LassoCV,IV-type,0.95,0.004,2.354070271524165,2.4911901944187225,1.0,0.305,0.18765120739490848,0.11522730290243077,3.74493409869544,1.2384844937405288,1000
+RF Regr.,LassoCV,partialling out,0.9,0.003,1.9489582706745046,2.190059714864294,1.0,0.342,0.16663434910102978,0.11054051768693487,3.4484420814862635,0.9343987618760414,1000
+RF Regr.,LassoCV,partialling out,0.95,0.006,2.3223270176162565,2.190059714864294,1.0,0.58,0.16663434910102978,0.09457408564648179,3.4484420814862635,0.9343987618760414,1000
+RF Regr.,RF Regr.,IV-type,0.9,0.016,1.7671562935719778,1.6053531172501359,1.0,0.908,0.11827277682192193,0.06796316869150489,2.9370932976098705,0.3875377103244566,1000
+RF Regr.,RF Regr.,IV-type,0.95,0.047,2.1056966004164406,1.6053531172501359,1.0,0.972,0.11827277682192193,0.05390037915368153,2.9370932976098705,0.3875377103244566,1000
+RF Regr.,RF Regr.,partialling out,0.9,0.016,1.7741971479960048,1.5898420246719582,1.0,0.93,0.11671620294392733,0.06643566871448575,2.9271556769700973,0.3806480611364335,1000
+RF Regr.,RF Regr.,partialling out,0.95,0.057,2.114086295928167,1.5898420246719582,1.0,0.98,0.11671620294392733,0.052344557926308814,2.9271556769700973,0.3806480611364335,1000
diff --git a/results/plm/plr_ate_sensitivity_metadata.csv b/results/plm/plr_ate_sensitivity_metadata.csv
index fa762a2..24030fe 100644
--- a/results/plm/plr_ate_sensitivity_metadata.csv
+++ b/results/plm/plr_ate_sensitivity_metadata.csv
@@ -1,2 +1,2 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,plr_ate_sensitivity.py,2025-05-22 14:16:38,9875.844212293625,3.12.3
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,PLRATESensitivityCoverageSimulation,2025-06-05 16:23,227.22630832592645,3.12.3,scripts/plm/plr_ate_sensitivity_config.yml
diff --git a/results/plm/plr_cate_config.yml b/results/plm/plr_cate_config.yml
new file mode 100644
index 0000000..20ce744
--- /dev/null
+++ b/results/plm/plr_cate_config.yml
@@ -0,0 +1,52 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 500
+ p:
+ - 10
+ support_size:
+ - 5
+ n_x:
+ - 1
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ rf: &id002
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 10
+ max_depth: 5
+ min_samples_leaf: 2
+ lgbm: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id001
+ - ml_g: *id002
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id002
+ ml_m: *id001
+ - ml_g: *id003
+ ml_m: *id003
+ - ml_g: *id003
+ ml_m: *id001
+ - ml_g: *id001
+ ml_m: *id003
+ score:
+ - partialling out
+ - IV-type
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/plm/plr_cate_coverage.csv b/results/plm/plr_cate_coverage.csv
index 7ac96f5..c95af2f 100644
--- a/results/plm/plr_cate_coverage.csv
+++ b/results/plm/plr_cate_coverage.csv
@@ -1,9 +1,29 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.548837,0.24645987587973475,0.11702632069330975,0.838,0.6259964209606882,1000
-LGBM,LGBM,0.95,0.6519645000000001,0.29367505560587087,0.11702632069330975,0.831,0.6258576065111882,1000
-LGBM,Lasso,0.9,0.8638669999999999,0.33911313826395484,0.09124937236613799,0.996,0.8623254691407899,1000
-LGBM,Lasso,0.95,0.9212285,0.40407822726059023,0.09124937236613799,0.995,0.8610181300321831,1000
-Lasso,LGBM,0.9,0.0416435,0.3017482613643098,0.4961073439476889,0.0,0.7663971451030848,1000
-Lasso,LGBM,0.95,0.053314,0.359555230314165,0.4961073439476889,0.0,0.7663491956301145,1000
-Lasso,Lasso,0.9,0.891967,0.20515299517628052,0.050477626432069274,0.999,0.520241144704702,1000
-Lasso,Lasso,0.95,0.944044,0.24445487141081157,0.050477626432069274,0.998,0.5231019747390456,1000
+Learner g,Learner m,Score,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Regr.,IV-type,0.9,0.81092,0.34748895671048663,0.10460813293283802,0.981,0.8749566905579788,1000
+LGBM Regr.,LGBM Regr.,IV-type,0.95,0.87944,0.4140586305179147,0.10460813293283802,0.976,0.8752701592984206,1000
+LGBM Regr.,LGBM Regr.,partialling out,0.9,0.74924,0.45498586012385417,0.15429490050948075,0.974,1.1431727401739051,1000
+LGBM Regr.,LGBM Regr.,partialling out,0.95,0.83409,0.5421490913878395,0.15429490050948075,0.979,1.1448962771888775,1000
+LGBM Regr.,LassoCV,IV-type,0.9,0.88,0.36554268929918754,0.09244785218836214,0.998,0.9180115191244616,1000
+LGBM Regr.,LassoCV,IV-type,0.95,0.9358200000000001,0.43557097975105097,0.09244785218836214,1.0,0.922278240560451,1000
+LGBM Regr.,LassoCV,partialling out,0.9,0.8425499999999999,0.6463383782822439,0.17877605236642957,0.993,1.6252735422347984,1000
+LGBM Regr.,LassoCV,partialling out,0.95,0.90764,0.7701596801698865,0.17877605236642957,0.99,1.6258971712277042,1000
+LassoCV,LGBM Regr.,IV-type,0.9,0.77804,0.356611018165839,0.11531004698375871,0.98,0.8990196528636599,1000
+LassoCV,LGBM Regr.,IV-type,0.95,0.85509,0.42492823716515665,0.11531004698375871,0.973,0.897125005016581,1000
+LassoCV,LGBM Regr.,partialling out,0.9,0.11495,0.5626129349999418,0.5271527276193878,0.232,1.4094053511501499,1000
+LassoCV,LGBM Regr.,partialling out,0.95,0.17364000000000002,0.6703946611225079,0.5271527276193878,0.245,1.415448802418257,1000
+LassoCV,LassoCV,IV-type,0.9,0.8912100000000001,0.36244677298144845,0.08838657089890865,0.999,0.913576206576413,1000
+LassoCV,LassoCV,IV-type,0.95,0.94274,0.43188196792501726,0.08838657089890865,0.998,0.912240620292473,1000
+LassoCV,LassoCV,partialling out,0.9,0.88858,0.3775763713434813,0.09330414285601538,0.997,0.9491441464568747,1000
+LassoCV,LassoCV,partialling out,0.95,0.94064,0.4499099963187045,0.09330414285601538,0.997,0.9487107676660669,1000
+LassoCV,RF Regr.,IV-type,0.9,0.89254,0.3599305351929274,0.08850837997692952,1.0,0.9044665568509359,1000
+LassoCV,RF Regr.,IV-type,0.95,0.94188,0.4288836856698476,0.08850837997692952,0.999,0.9044733402499352,1000
+LassoCV,RF Regr.,partialling out,0.9,0.77416,0.43217879712767876,0.1405937817588716,0.981,1.090157100332438,1000
+LassoCV,RF Regr.,partialling out,0.95,0.85737,0.5149727996295947,0.1405937817588716,0.978,1.087530105804212,1000
+RF Regr.,LassoCV,IV-type,0.9,0.8807699999999999,0.3475079221052468,0.08785236408566467,0.996,0.8749646348632354,1000
+RF Regr.,LassoCV,IV-type,0.95,0.93665,0.4140812291796275,0.08785236408566467,0.998,0.8759773436970753,1000
+RF Regr.,LassoCV,partialling out,0.9,0.8651,0.44409447433118815,0.11793812231956644,0.995,1.113461220582107,1000
+RF Regr.,LassoCV,partialling out,0.95,0.9245099999999999,0.5291712047567231,0.11793812231956644,0.995,1.1193035806604223,1000
+RF Regr.,RF Regr.,IV-type,0.9,0.8769600000000001,0.3430202960561061,0.08782119424850063,0.997,0.8607856773588846,1000
+RF Regr.,RF Regr.,IV-type,0.95,0.9322,0.4087338929253366,0.08782119424850063,0.998,0.8634371330487173,1000
+RF Regr.,RF Regr.,partialling out,0.9,0.88322,0.3831582275710224,0.09673737638816682,0.996,0.9645065365685301,1000
+RF Regr.,RF Regr.,partialling out,0.95,0.9354,0.45656118825068054,0.09673737638816682,0.998,0.9640177197875869,1000
diff --git a/results/plm/plr_cate_coverage_metadata.csv b/results/plm/plr_cate_coverage_metadata.csv
deleted file mode 100644
index fe8d34a..0000000
--- a/results/plm/plr_cate_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,plr_cate_coverage.py,2025-05-22 12:47:29,4525.569060087204,3.12.3
diff --git a/results/plm/plr_cate_metadata.csv b/results/plm/plr_cate_metadata.csv
new file mode 100644
index 0000000..be41517
--- /dev/null
+++ b/results/plm/plr_cate_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,PLRCATECoverageSimulation,2025-06-05 15:41,185.28740434646608,3.12.3,scripts/plm/plr_cate_config.yml
diff --git a/results/plm/plr_gate_config.yml b/results/plm/plr_gate_config.yml
new file mode 100644
index 0000000..20ce744
--- /dev/null
+++ b/results/plm/plr_gate_config.yml
@@ -0,0 +1,52 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 500
+ p:
+ - 10
+ support_size:
+ - 5
+ n_x:
+ - 1
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ rf: &id002
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 10
+ max_depth: 5
+ min_samples_leaf: 2
+ lgbm: &id003
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id001
+ - ml_g: *id002
+ ml_m: *id002
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id002
+ ml_m: *id001
+ - ml_g: *id003
+ ml_m: *id003
+ - ml_g: *id003
+ ml_m: *id001
+ - ml_g: *id001
+ ml_m: *id003
+ score:
+ - partialling out
+ - IV-type
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/plm/plr_gate_coverage.csv b/results/plm/plr_gate_coverage.csv
index de92fa2..df0c44b 100644
--- a/results/plm/plr_gate_coverage.csv
+++ b/results/plm/plr_gate_coverage.csv
@@ -1,9 +1,29 @@
-Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
-LGBM,LGBM,0.9,0.694,0.42514371296714487,0.1552302674837282,0.953,0.9991019141230857,1000
-LGBM,LGBM,0.95,0.775,0.5065899798109038,0.1552302674837282,0.955,0.9994456036482492,1000
-LGBM,Lasso,0.9,0.8586666666666666,0.6309690380685457,0.1706803130476304,1.0,1.4834269690149875,1000
-LGBM,Lasso,0.95,0.9153333333333333,0.7518459817401837,0.1706803130476304,0.998,1.4857363367985323,1000
-Lasso,LGBM,0.9,0.033333333333333326,0.5039828586868691,0.6915874651279188,0.009,1.1844079148312503,1000
-Lasso,LGBM,0.95,0.056666666666666664,0.6005326161954867,0.6915874651279188,0.013,1.1844261861006267,1000
-Lasso,Lasso,0.9,0.9116666666666666,0.3672698862053673,0.08452872727745843,0.999,0.861811460445299,1000
-Lasso,Lasso,0.95,0.9576666666666667,0.43762906180458633,0.08452872727745843,1.0,0.8638737943195437,1000
+Learner g,Learner m,Score,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition
+LGBM Regr.,LGBM Regr.,IV-type,0.9,0.8023333333333333,0.340614107591605,0.10669475143783842,0.987,0.7987268267983838,1000
+LGBM Regr.,LGBM Regr.,IV-type,0.95,0.8706666666666666,0.40586674252777905,0.10669475143783842,0.988,0.7989724883932184,1000
+LGBM Regr.,LGBM Regr.,partialling out,0.9,0.725,0.41206326461441856,0.14116055364172336,0.982,0.9688824136316435,1000
+LGBM Regr.,LGBM Regr.,partialling out,0.95,0.816,0.4910036642549748,0.14116055364172336,0.978,0.963097819307053,1000
+LGBM Regr.,LassoCV,IV-type,0.9,0.884,0.3584385924494663,0.09037899338673383,0.999,0.8422006093284867,1000
+LGBM Regr.,LassoCV,IV-type,0.95,0.94,0.4271059262411261,0.09037899338673383,0.999,0.8410103142728071,1000
+LGBM Regr.,LassoCV,partialling out,0.9,0.846,0.5546020564560807,0.15058630900692344,0.995,1.3030240395599795,1000
+LGBM Regr.,LassoCV,partialling out,0.95,0.9063333333333333,0.6608491105803649,0.15058630900692344,0.998,1.3052579182429735,1000
+LassoCV,LGBM Regr.,IV-type,0.9,0.7443333333333334,0.3533683685919372,0.12308426099091321,0.984,0.8290587683546056,1000
+LassoCV,LGBM Regr.,IV-type,0.95,0.828,0.4210643818802881,0.12308426099091321,0.986,0.8297177472370508,1000
+LassoCV,LGBM Regr.,partialling out,0.9,0.12766666666666665,0.4805861056396863,0.48492787025671996,0.166,1.1277373422329622,1000
+LassoCV,LGBM Regr.,partialling out,0.95,0.18,0.5726536654023722,0.48492787025671996,0.163,1.1286363645960298,1000
+LassoCV,LassoCV,IV-type,0.9,0.908,0.35675825943241785,0.08468553801157398,0.998,0.8347654007058711,1000
+LassoCV,LassoCV,IV-type,0.95,0.9536666666666667,0.42510368595573833,0.08468553801157398,1.0,0.8406082357109622,1000
+LassoCV,LassoCV,partialling out,0.9,0.897,0.3685816198393858,0.08926222502259333,0.999,0.8634981533000406,1000
+LassoCV,LassoCV,partialling out,0.95,0.9493333333333334,0.43919208883499217,0.08926222502259333,0.998,0.8629765470291304,1000
+LassoCV,RF Regr.,IV-type,0.9,0.9046666666666666,0.35535128701248625,0.08564429580896525,0.998,0.8309506852608253,1000
+LassoCV,RF Regr.,IV-type,0.95,0.9526666666666667,0.423427174912371,0.08564429580896525,0.997,0.8339321583590988,1000
+LassoCV,RF Regr.,partialling out,0.9,0.7333333333333334,0.4028059779258174,0.13583622582602936,0.98,0.9463483076426671,1000
+LassoCV,RF Regr.,partialling out,0.95,0.8286666666666667,0.4799729268039788,0.13583622582602936,0.988,0.9464873702847479,1000
+RF Regr.,LassoCV,IV-type,0.9,0.8856666666666666,0.34695511339781726,0.0872292667085423,0.999,0.8159480135750993,1000
+RF Regr.,LassoCV,IV-type,0.95,0.936,0.41342251697621396,0.0872292667085423,0.998,0.8161295955897384,1000
+RF Regr.,LassoCV,partialling out,0.9,0.86,0.4138648906001244,0.108596480808861,0.999,0.97698823470601,1000
+RF Regr.,LassoCV,partialling out,0.95,0.9236666666666666,0.4931504340269086,0.108596480808861,0.999,0.9729165052534585,1000
+RF Regr.,RF Regr.,IV-type,0.9,0.8836666666666666,0.34359869293370354,0.08702820649024633,1.0,0.8068440871102968,1000
+RF Regr.,RF Regr.,IV-type,0.95,0.9393333333333334,0.4094230953141002,0.08702820649024633,0.998,0.8071122999062327,1000
+RF Regr.,RF Regr.,partialling out,0.9,0.8806666666666666,0.3685754520029418,0.09347367483176777,1.0,0.8690762276450229,1000
+RF Regr.,RF Regr.,partialling out,0.95,0.9383333333333334,0.4391847394045658,0.09347367483176777,1.0,0.8688884478341746,1000
diff --git a/results/plm/plr_gate_coverage_metadata.csv b/results/plm/plr_gate_coverage_metadata.csv
deleted file mode 100644
index 334d6ba..0000000
--- a/results/plm/plr_gate_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.10.dev0,plr_gate_coverage.py,2025-05-22 11:58:55,1613.3821394443512,3.12.3
diff --git a/results/plm/plr_gate_metadata.csv b/results/plm/plr_gate_metadata.csv
new file mode 100644
index 0000000..c820b6d
--- /dev/null
+++ b/results/plm/plr_gate_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,PLRGATECoverageSimulation,2025-06-05 15:40,184.26536533435186,3.12.3,scripts/plm/plr_gate_config.yml
diff --git a/results/rdd/rdd_fuzzy_config.yml b/results/rdd/rdd_fuzzy_config.yml
new file mode 100644
index 0000000..1c010bd
--- /dev/null
+++ b/results/rdd/rdd_fuzzy_config.yml
@@ -0,0 +1,63 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 2000
+ fuzzy:
+ - true
+ cutoff:
+ - 0.0
+learner_definitions:
+ lgbmr: &id001
+ name: LGBM Regr.
+ params:
+ n_estimators: 200
+ learning_rate: 0.02
+ max_depth: 5
+ lgbmc: &id002
+ name: LGBM Clas.
+ params:
+ n_estimators: 200
+ learning_rate: 0.02
+ max_depth: 5
+ global_linear: &id003
+ name: Global Linear
+ global_logistic: &id004
+ name: Global Logistic
+ local_linear: &id005
+ name: Linear
+ local_logistic: &id006
+ name: Logistic
+ stacked_reg: &id007
+ name: Stacked Regr.
+ params:
+ n_estimators: 200
+ learning_rate: 0.02
+ max_depth: 5
+ stacked_cls: &id008
+ name: Stacked Clas.
+ params:
+ n_estimators: 200
+ learning_rate: 0.02
+ max_depth: 5
+dml_parameters:
+ fs_specification:
+ - cutoff
+ - cutoff and score
+ - interacted cutoff and score
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ - ml_g: *id005
+ ml_m: *id006
+ - ml_g: *id007
+ ml_m: *id008
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/rdd/rdd_fuzzy_coverage.csv b/results/rdd/rdd_fuzzy_coverage.csv
index ebec18a..a6c0a42 100644
--- a/results/rdd/rdd_fuzzy_coverage.csv
+++ b/results/rdd/rdd_fuzzy_coverage.csv
@@ -1,99 +1,27 @@
-Method,fs specification,Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
-rdflex,cutoff,Global linear,Global linear,0.9,0.928,10.556606789284906,2.56683549934152,250
-rdflex,cutoff,Global linear,Global linear,0.95,0.964,12.578972844104541,2.56683549934152,250
-rdflex,cutoff,Global linear,LGBM,0.9,0.932,10.775966548524359,2.6575671123131777,250
-rdflex,cutoff,Global linear,LGBM,0.95,0.976,12.840356119018518,2.6575671123131777,250
-rdflex,cutoff,Global linear,Linear,0.9,0.928,10.680654195730742,2.6095109173162823,250
-rdflex,cutoff,Global linear,Linear,0.95,0.964,12.726784445711942,2.6095109173162823,250
-rdflex,cutoff,Global linear,Stacked,0.9,0.932,10.571620600845328,2.6448751082127377,250
-rdflex,cutoff,Global linear,Stacked,0.95,0.96,12.596862904014394,2.6448751082127377,250
-rdflex,cutoff,LGBM,Global linear,0.9,0.94,2.038612360942403,0.46302745914972104,250
-rdflex,cutoff,LGBM,Global linear,0.95,0.968,2.4291564552711176,0.46302745914972104,250
-rdflex,cutoff,LGBM,LGBM,0.9,0.94,2.0417477871653618,0.4912050569704094,250
-rdflex,cutoff,LGBM,LGBM,0.95,0.976,2.4328925460529893,0.4912050569704094,250
-rdflex,cutoff,LGBM,Linear,0.9,0.96,2.088603350877394,0.47982216259182514,250
-rdflex,cutoff,LGBM,Linear,0.95,0.992,2.488724393851574,0.47982216259182514,250
-rdflex,cutoff,LGBM,Stacked,0.9,0.916,2.0184810970572267,0.46215722170125356,250
-rdflex,cutoff,LGBM,Stacked,0.95,0.972,2.405168574810687,0.46215722170125356,250
-rdflex,cutoff,Linear,Global linear,0.9,0.928,10.564695732971051,2.553945752977564,250
-rdflex,cutoff,Linear,Global linear,0.95,0.968,12.58861141500109,2.553945752977564,250
-rdflex,cutoff,Linear,LGBM,0.9,0.928,10.777243710702589,2.650598511347432,250
-rdflex,cutoff,Linear,LGBM,0.95,0.972,12.841877951618534,2.650598511347432,250
-rdflex,cutoff,Linear,Linear,0.9,0.92,10.74105034617443,2.6072040698667376,250
-rdflex,cutoff,Linear,Linear,0.95,0.968,12.798750897762686,2.6072040698667376,250
-rdflex,cutoff,Linear,Stacked,0.9,0.932,10.404127851046006,2.505787762202565,250
-rdflex,cutoff,Linear,Stacked,0.95,0.968,12.397282982798743,2.505787762202565,250
-rdflex,cutoff,Stacked,Global linear,0.9,0.928,2.100685938954007,0.4945555538677959,250
-rdflex,cutoff,Stacked,Global linear,0.95,0.972,2.50312168555107,0.4945555538677959,250
-rdflex,cutoff,Stacked,LGBM,0.9,0.956,2.003482953646103,0.4641511546138578,250
-rdflex,cutoff,Stacked,LGBM,0.95,0.992,2.3872971846522506,0.4641511546138578,250
-rdflex,cutoff,Stacked,Linear,0.9,0.944,2.11736656727605,0.46071259221608185,250
-rdflex,cutoff,Stacked,Linear,0.95,0.984,2.522997880134595,0.46071259221608185,250
-rdflex,cutoff,Stacked,Stacked,0.9,0.936,2.0245073903258874,0.44210382922098473,250
-rdflex,cutoff,Stacked,Stacked,0.95,0.976,2.412349346140925,0.44210382922098473,250
-rdflex,cutoff and score,Global linear,Global linear,0.9,0.928,10.544420005750476,2.567803474303041,250
-rdflex,cutoff and score,Global linear,Global linear,0.95,0.964,12.564451395859244,2.567803474303041,250
-rdflex,cutoff and score,Global linear,LGBM,0.9,0.928,10.805365747954351,2.6437437844814258,250
-rdflex,cutoff and score,Global linear,LGBM,0.95,0.972,12.875387425824758,2.6437437844814258,250
-rdflex,cutoff and score,Global linear,Linear,0.9,0.928,10.699263774918355,2.5999147658504103,250
-rdflex,cutoff and score,Global linear,Linear,0.95,0.964,12.748959127019463,2.5999147658504103,250
-rdflex,cutoff and score,Global linear,Stacked,0.9,0.932,10.82713502969946,2.6270982998447745,250
-rdflex,cutoff and score,Global linear,Stacked,0.95,0.972,12.901327124950933,2.6270982998447745,250
-rdflex,cutoff and score,LGBM,Global linear,0.9,0.956,2.1656416896389774,0.4963137476876054,250
-rdflex,cutoff and score,LGBM,Global linear,0.95,0.984,2.5805212363957613,0.4963137476876054,250
-rdflex,cutoff and score,LGBM,LGBM,0.9,0.94,2.201449781464929,0.5341193634721544,250
-rdflex,cutoff and score,LGBM,LGBM,0.95,0.972,2.6231892095114255,0.5341193634721544,250
-rdflex,cutoff and score,LGBM,Linear,0.9,0.952,2.1894694844073546,0.4635302319336836,250
-rdflex,cutoff and score,LGBM,Linear,0.95,0.98,2.6089138050789615,0.4635302319336836,250
-rdflex,cutoff and score,LGBM,Stacked,0.9,0.964,2.1361906429140674,0.5017184797294254,250
-rdflex,cutoff and score,LGBM,Stacked,0.95,0.984,2.545428149727124,0.5017184797294254,250
-rdflex,cutoff and score,Linear,Global linear,0.9,0.92,10.636843214150689,2.585401553398786,250
-rdflex,cutoff and score,Linear,Global linear,0.95,0.968,12.67458044128427,2.585401553398786,250
-rdflex,cutoff and score,Linear,LGBM,0.9,0.936,10.869584147939532,2.6903868750455233,250
-rdflex,cutoff and score,Linear,LGBM,0.95,0.98,12.95190836911928,2.6903868750455233,250
-rdflex,cutoff and score,Linear,Linear,0.9,0.912,10.715620665219745,2.5870008277605034,250
-rdflex,cutoff and score,Linear,Linear,0.95,0.968,12.76844956395835,2.5870008277605034,250
-rdflex,cutoff and score,Linear,Stacked,0.9,0.92,10.814063241507966,2.6455094849616714,250
-rdflex,cutoff and score,Linear,Stacked,0.95,0.964,12.88575112861359,2.6455094849616714,250
-rdflex,cutoff and score,Stacked,Global linear,0.9,0.932,2.1726374228239367,0.4855323537680247,250
-rdflex,cutoff and score,Stacked,Global linear,0.95,0.976,2.5888571666349667,0.4855323537680247,250
-rdflex,cutoff and score,Stacked,LGBM,0.9,0.952,2.14733586500187,0.5004783937020142,250
-rdflex,cutoff and score,Stacked,LGBM,0.95,0.972,2.5587085009595185,0.5004783937020142,250
-rdflex,cutoff and score,Stacked,Linear,0.9,0.948,2.202424304512329,0.49388698262577957,250
-rdflex,cutoff and score,Stacked,Linear,0.95,0.992,2.6243504253446837,0.49388698262577957,250
-rdflex,cutoff and score,Stacked,Stacked,0.9,0.956,2.23318991538815,0.4972797313578032,250
-rdflex,cutoff and score,Stacked,Stacked,0.95,0.98,2.6610099118126316,0.4972797313578032,250
-rdflex,interacted cutoff and score,Global linear,Global linear,0.9,0.932,10.545804244171494,2.539582402518461,250
-rdflex,interacted cutoff and score,Global linear,Global linear,0.95,0.964,12.566100817672075,2.539582402518461,250
-rdflex,interacted cutoff and score,Global linear,LGBM,0.9,0.932,10.884267411835014,2.650211237977455,250
-rdflex,interacted cutoff and score,Global linear,LGBM,0.95,0.972,12.969404557192851,2.650211237977455,250
-rdflex,interacted cutoff and score,Global linear,Linear,0.9,0.936,10.674123116042953,2.5790433126760175,250
-rdflex,interacted cutoff and score,Global linear,Linear,0.95,0.964,12.719002184264186,2.5790433126760175,250
-rdflex,interacted cutoff and score,Global linear,Stacked,0.9,0.932,10.626731756603398,2.6159768660130136,250
-rdflex,interacted cutoff and score,Global linear,Stacked,0.95,0.968,12.662531896478109,2.6159768660130136,250
-rdflex,interacted cutoff and score,LGBM,Global linear,0.9,0.936,2.1565580135903732,0.5032955415920486,250
-rdflex,interacted cutoff and score,LGBM,Global linear,0.95,0.98,2.569697368781784,0.5032955415920486,250
-rdflex,interacted cutoff and score,LGBM,LGBM,0.9,0.944,2.2329626116556094,0.5196735971141546,250
-rdflex,interacted cutoff and score,LGBM,LGBM,0.95,0.98,2.6607390627096894,0.5196735971141546,250
-rdflex,interacted cutoff and score,LGBM,Linear,0.9,0.94,2.188972984996604,0.5079568034409114,250
-rdflex,interacted cutoff and score,LGBM,Linear,0.95,0.988,2.608322189540976,0.5079568034409114,250
-rdflex,interacted cutoff and score,LGBM,Stacked,0.9,0.94,2.1435376000141364,0.5369566034254465,250
-rdflex,interacted cutoff and score,LGBM,Stacked,0.95,0.988,2.55418258907428,0.5369566034254465,250
-rdflex,interacted cutoff and score,Linear,Global linear,0.9,0.92,10.689912181088147,2.611324508403497,250
-rdflex,interacted cutoff and score,Linear,Global linear,0.95,0.968,12.737816015678167,2.611324508403497,250
-rdflex,interacted cutoff and score,Linear,LGBM,0.9,0.936,10.997728797394373,2.7029090494109984,250
-rdflex,interacted cutoff and score,Linear,LGBM,0.95,0.976,13.104602136897563,2.7029090494109984,250
-rdflex,interacted cutoff and score,Linear,Linear,0.9,0.936,10.779112620544133,2.610958342807748,250
-rdflex,interacted cutoff and score,Linear,Linear,0.95,0.968,12.844104895049702,2.610958342807748,250
-rdflex,interacted cutoff and score,Linear,Stacked,0.9,0.92,10.737943290197988,2.6468998393072347,250
-rdflex,interacted cutoff and score,Linear,Stacked,0.95,0.968,12.795048612214599,2.6468998393072347,250
-rdflex,interacted cutoff and score,Stacked,Global linear,0.9,0.932,2.233890329611388,0.5262031584833204,250
-rdflex,interacted cutoff and score,Stacked,Global linear,0.95,0.976,2.661844506836355,0.5262031584833204,250
-rdflex,interacted cutoff and score,Stacked,LGBM,0.9,0.936,2.20946398801941,0.5095892633671805,250
-rdflex,interacted cutoff and score,Stacked,LGBM,0.95,0.98,2.632738725622813,0.5095892633671805,250
-rdflex,interacted cutoff and score,Stacked,Linear,0.9,0.928,2.2332457187032886,0.49902647466338274,250
-rdflex,interacted cutoff and score,Stacked,Linear,0.95,0.968,2.661076405563868,0.49902647466338274,250
-rdflex,interacted cutoff and score,Stacked,Stacked,0.9,0.948,2.181302390767035,0.4919155381101698,250
-rdflex,interacted cutoff and score,Stacked,Stacked,0.95,0.984,2.599182112768406,0.4919155381101698,250
-rdrobust,cutoff,linear,linear,0.9,0.928,10.396400783944904,2.560336967798922,250
-rdrobust,cutoff,linear,linear,0.95,0.964,12.388075614449283,2.560336967798922,250
+Method,fs_specification,Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
+RDFlex,cutoff,Global Linear,Global Logistic,0.9,0.914,159.78774916508115,16.371821196962774,1000
+RDFlex,cutoff,Global Linear,Global Logistic,0.95,0.9616666666666667,190.39884668322458,16.371821196962774,1000
+RDFlex,cutoff,LGBM Regr.,LGBM Clas.,0.9,0.911,21.894892395588858,2.054842519054099,1000
+RDFlex,cutoff,LGBM Regr.,LGBM Clas.,0.95,0.964,26.089373447939103,2.054842519054099,1000
+RDFlex,cutoff,Linear,Logistic,0.9,0.912,24.13350016941463,3.970042561943364,1000
+RDFlex,cutoff,Linear,Logistic,0.95,0.9606666666666667,28.756839136264052,3.970042561943364,1000
+RDFlex,cutoff,Stacked Regr.,Stacked Clas.,0.9,0.9203333333333333,3.9519734545980754,0.647703696648072,1000
+RDFlex,cutoff,Stacked Regr.,Stacked Clas.,0.95,0.9736666666666667,4.709066820265513,0.647703696648072,1000
+RDFlex,cutoff and score,Global Linear,Global Logistic,0.9,0.9136666666666666,40.229554938489734,4.971844764740165,1000
+RDFlex,cutoff and score,Global Linear,Global Logistic,0.95,0.961,47.9364713683679,4.971844764740165,1000
+RDFlex,cutoff and score,LGBM Regr.,LGBM Clas.,0.9,0.9146666666666666,16.86998628777336,1.5860785538090398,1000
+RDFlex,cutoff and score,LGBM Regr.,LGBM Clas.,0.95,0.9656666666666667,20.101828516499268,1.5860785538090398,1000
+RDFlex,cutoff and score,Linear,Logistic,0.9,0.914,97.14155501358066,9.810720713145727,1000
+RDFlex,cutoff and score,Linear,Logistic,0.95,0.963,115.75130218833239,9.810720713145727,1000
+RDFlex,cutoff and score,Stacked Regr.,Stacked Clas.,0.9,0.923,2.1887222402404873,0.5251851443672533,1000
+RDFlex,cutoff and score,Stacked Regr.,Stacked Clas.,0.95,0.9683333333333334,2.6080234087356517,0.5251851443672533,1000
+RDFlex,interacted cutoff and score,Global Linear,Global Logistic,0.9,0.916,39.62681032308488,5.974362235508999,1000
+RDFlex,interacted cutoff and score,Global Linear,Global Logistic,0.95,0.9616666666666667,47.218256860577036,5.974362235508999,1000
+RDFlex,interacted cutoff and score,LGBM Regr.,LGBM Clas.,0.9,0.9126666666666666,207.48089868228553,16.46141812561768,1000
+RDFlex,interacted cutoff and score,LGBM Regr.,LGBM Clas.,0.95,0.9626666666666667,247.22873952679137,16.46141812561768,1000
+RDFlex,interacted cutoff and score,Linear,Logistic,0.9,0.9143333333333333,1703.658231476743,157.0745025974768,1000
+RDFlex,interacted cutoff and score,Linear,Logistic,0.95,0.9626666666666667,2030.033992658808,157.0745025974768,1000
+RDFlex,interacted cutoff and score,Stacked Regr.,Stacked Clas.,0.9,0.9093333333333333,2.7930762710442028,0.5786138381787103,1000
+RDFlex,interacted cutoff and score,Stacked Regr.,Stacked Clas.,0.95,0.9663333333333334,3.328155653257759,0.5786138381787103,1000
+rdrobust,cutoff,Linear,Logistic,0.9,0.935,16.18988307541303,3.355681291457316,1000
+rdrobust,cutoff,Linear,Logistic,0.95,0.976,19.291435554988908,3.355681291457316,1000
diff --git a/results/rdd/rdd_fuzzy_coverage_metadata.csv b/results/rdd/rdd_fuzzy_coverage_metadata.csv
deleted file mode 100644
index 6c9e78a..0000000
--- a/results/rdd/rdd_fuzzy_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.9.dev0,rdd_fuzzy_coverage.py,2025-05-22 17:03:51,19875.749115228653,3.12.10
diff --git a/results/rdd/rdd_fuzzy_metadata.csv b/results/rdd/rdd_fuzzy_metadata.csv
new file mode 100644
index 0000000..ca7af26
--- /dev/null
+++ b/results/rdd/rdd_fuzzy_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,RDDCoverageSimulation,2025-06-05 20:21,208.91020024220148,3.12.3,scripts/rdd/rdd_fuzzy_config.yml
diff --git a/results/rdd/rdd_sharp_config.yml b/results/rdd/rdd_sharp_config.yml
new file mode 100644
index 0000000..57d0a43
--- /dev/null
+++ b/results/rdd/rdd_sharp_config.yml
@@ -0,0 +1,41 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ n_obs:
+ - 1000
+ fuzzy:
+ - false
+ cutoff:
+ - 0.0
+learner_definitions:
+ lgbmr: &id001
+ name: LGBM Regr.
+ params:
+ n_estimators: 100
+ learning_rate: 0.05
+ global_linear: &id002
+ name: Global Linear
+ local_linear: &id003
+ name: Linear
+ stacked_reg: &id004
+ name: Stacked Regr.
+ params:
+ n_estimators: 100
+ learning_rate: 0.05
+dml_parameters:
+ fs_specification:
+ - cutoff
+ - cutoff and score
+ - interacted cutoff and score
+ learners:
+ - ml_g: *id001
+ - ml_g: *id002
+ - ml_g: *id003
+ - ml_g: *id004
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/rdd/rdd_sharp_coverage.csv b/results/rdd/rdd_sharp_coverage.csv
index 558f06e..e6fe699 100644
--- a/results/rdd/rdd_sharp_coverage.csv
+++ b/results/rdd/rdd_sharp_coverage.csv
@@ -1,27 +1,27 @@
-Method,fs specification,Learner g,level,Coverage,CI Length,Bias,repetition
-rdflex,cutoff,Global linear,0.9,0.874,2.2011088216647567,0.5534513823633507,500
-rdflex,cutoff,Global linear,0.95,0.922,2.6227829308507813,0.5534513823633507,500
-rdflex,cutoff,LGBM,0.9,0.914,0.5720094363002372,0.1386055523323647,500
-rdflex,cutoff,LGBM,0.95,0.96,0.681591283014874,0.1386055523323647,500
-rdflex,cutoff,Linear,0.9,0.874,2.2135060296971463,0.558126057195101,500
-rdflex,cutoff,Linear,0.95,0.918,2.6375551153504855,0.558126057195101,500
-rdflex,cutoff,Stacked,0.9,0.9,0.5590842827226126,0.12977180379581715,500
-rdflex,cutoff,Stacked,0.95,0.964,0.6661900125968221,0.12977180379581715,500
-rdflex,cutoff and score,Global linear,0.9,0.876,2.2005650015589864,0.5551812535814453,500
-rdflex,cutoff and score,Global linear,0.95,0.922,2.622134929226859,0.5551812535814453,500
-rdflex,cutoff and score,LGBM,0.9,0.902,0.5984294788353389,0.14476299319184177,500
-rdflex,cutoff and score,LGBM,0.95,0.95,0.7130727054286046,0.14476299319184177,500
-rdflex,cutoff and score,Linear,0.9,0.87,2.212887444081595,0.5582781571131341,500
-rdflex,cutoff and score,Linear,0.95,0.922,2.63681802512679,0.5582781571131341,500
-rdflex,cutoff and score,Stacked,0.9,0.88,0.5820101876494705,0.14231610717047102,500
-rdflex,cutoff and score,Stacked,0.95,0.956,0.6935079132497274,0.14231610717047102,500
-rdflex,interacted cutoff and score,Global linear,0.9,0.878,2.202268208292408,0.5546663773642981,500
-rdflex,interacted cutoff and score,Global linear,0.95,0.926,2.6241644252264025,0.5546663773642981,500
-rdflex,interacted cutoff and score,LGBM,0.9,0.886,0.6002731299225453,0.151487055237104,500
-rdflex,interacted cutoff and score,LGBM,0.95,0.948,0.7152695511975984,0.151487055237104,500
-rdflex,interacted cutoff and score,Linear,0.9,0.88,2.2252512728137637,0.5541600195953326,500
-rdflex,interacted cutoff and score,Linear,0.95,0.916,2.651550435737079,0.5541600195953326,500
-rdflex,interacted cutoff and score,Stacked,0.9,0.904,0.5793073094025809,0.14171607962579613,500
-rdflex,interacted cutoff and score,Stacked,0.95,0.962,0.6902872351713266,0.14171607962579613,500
-rdrobust,cutoff,linear,0.9,0.874,2.1797037623552287,0.555486091306879,500
-rdrobust,cutoff,linear,0.95,0.916,2.597277229525019,0.555486091306879,500
+Method,fs_specification,Learner g,Learner m,level,Coverage,CI Length,Bias,repetition
+RDFlex,cutoff,Global Linear,N/A,0.9,0.8693333333333334,1.9777420628015956,0.5363309802914388,1000
+RDFlex,cutoff,Global Linear,N/A,0.95,0.9296666666666666,2.356625021391914,0.5363309802914388,1000
+RDFlex,cutoff,LGBM Regr.,N/A,0.9,0.8756666666666666,0.5745846597977408,0.15277974067846242,1000
+RDFlex,cutoff,LGBM Regr.,N/A,0.95,0.9296666666666666,0.6846598510774335,0.15277974067846242,1000
+RDFlex,cutoff,Linear,N/A,0.9,0.8666666666666666,1.991920387201889,0.5404127325625548,1000
+RDFlex,cutoff,Linear,N/A,0.95,0.9286666666666666,2.3735195369465925,0.5404127325625548,1000
+RDFlex,cutoff,Stacked Regr.,N/A,0.9,0.8813333333333334,0.5670086678915763,0.1471163883503396,1000
+RDFlex,cutoff,Stacked Regr.,N/A,0.95,0.9423333333333334,0.6756324999259701,0.1471163883503396,1000
+RDFlex,cutoff and score,Global Linear,N/A,0.9,0.868,1.9777819118149618,0.5362223371501555,1000
+RDFlex,cutoff and score,Global Linear,N/A,0.95,0.9283333333333333,2.3566725044200316,0.5362223371501555,1000
+RDFlex,cutoff and score,LGBM Regr.,N/A,0.9,0.8663333333333334,0.6047557690566959,0.16255491915792666,1000
+RDFlex,cutoff and score,LGBM Regr.,N/A,0.95,0.934,0.7206109451761669,0.16255491915792666,1000
+RDFlex,cutoff and score,Linear,N/A,0.9,0.869,1.99069360970139,0.5359824339657442,1000
+RDFlex,cutoff and score,Linear,N/A,0.95,0.932,2.372057741393101,0.5359824339657442,1000
+RDFlex,cutoff and score,Stacked Regr.,N/A,0.9,0.8926666666666666,0.5869731443946519,0.15208518895862003,1000
+RDFlex,cutoff and score,Stacked Regr.,N/A,0.95,0.9443333333333334,0.6994216409626386,0.15208518895862003,1000
+RDFlex,interacted cutoff and score,Global Linear,N/A,0.9,0.8666666666666666,1.9803466175426516,0.5369353192144897,1000
+RDFlex,interacted cutoff and score,Global Linear,N/A,0.95,0.928,2.359728539786857,0.5369353192144897,1000
+RDFlex,interacted cutoff and score,LGBM Regr.,N/A,0.9,0.884,0.6090642078915169,0.16109292443371379,1000
+RDFlex,interacted cutoff and score,LGBM Regr.,N/A,0.95,0.94,0.725744766695285,0.16109292443371379,1000
+RDFlex,interacted cutoff and score,Linear,N/A,0.9,0.8683333333333334,2.000108510613013,0.5391094961686808,1000
+RDFlex,interacted cutoff and score,Linear,N/A,0.95,0.9276666666666666,2.3832762877746387,0.5391094961686808,1000
+RDFlex,interacted cutoff and score,Stacked Regr.,N/A,0.9,0.8766666666666666,0.5858746023498046,0.15217399239455562,1000
+RDFlex,interacted cutoff and score,Stacked Regr.,N/A,0.95,0.934,0.6981126473791827,0.15217399239455562,1000
+rdrobust,cutoff,Linear,Logistic,0.9,0.888,2.18636563211321,0.5631032433381369,1000
+rdrobust,cutoff,Linear,Logistic,0.95,0.94,2.605215336953788,0.5631032433381369,1000
diff --git a/results/rdd/rdd_sharp_coverage_metadata.csv b/results/rdd/rdd_sharp_coverage_metadata.csv
deleted file mode 100644
index 586a02d..0000000
--- a/results/rdd/rdd_sharp_coverage_metadata.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-DoubleML Version,Script,Date,Total Runtime (seconds),Python Version
-0.9.dev0,rdd_sharp_coverage.py,2025-05-22 12:43:00,4216.746794462204,3.12.10
diff --git a/results/rdd/rdd_sharp_metadata.csv b/results/rdd/rdd_sharp_metadata.csv
new file mode 100644
index 0000000..4c44c96
--- /dev/null
+++ b/results/rdd/rdd_sharp_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,RDDCoverageSimulation,2025-06-05 17:58,65.60530270735423,3.12.3,scripts/rdd/rdd_sharp_config.yml
diff --git a/results/ssm/ssm_mar_ate_config.yml b/results/ssm/ssm_mar_ate_config.yml
new file mode 100644
index 0000000..6c5f926
--- /dev/null
+++ b/results/ssm/ssm_mar_ate_config.yml
@@ -0,0 +1,74 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 1.0
+ n_obs:
+ - 500
+ dim_x:
+ - 20
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ logit: &id002
+ name: Logistic
+ rfr: &id003
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ rfc: &id004
+ name: RF Clas.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ lgbmr: &id005
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ lgbmc: &id006
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ ml_pi: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ ml_pi: *id004
+ - ml_g: *id001
+ ml_m: *id004
+ ml_pi: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ ml_pi: *id004
+ - ml_g: *id003
+ ml_m: *id004
+ ml_pi: *id002
+ - ml_g: *id005
+ ml_m: *id006
+ ml_pi: *id006
+ - ml_g: *id001
+ ml_m: *id006
+ ml_pi: *id006
+ - ml_g: *id005
+ ml_m: *id002
+ ml_pi: *id006
+ - ml_g: *id005
+ ml_m: *id006
+ ml_pi: *id002
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/ssm/ssm_mar_ate_coverage.csv b/results/ssm/ssm_mar_ate_coverage.csv
new file mode 100644
index 0000000..aa8edb2
--- /dev/null
+++ b/results/ssm/ssm_mar_ate_coverage.csv
@@ -0,0 +1,19 @@
+Learner g,Learner m,Learner pi,level,Coverage,CI Length,Bias,repetition
+LGBM Regr.,LGBM Clas.,LGBM Clas.,0.9,0.934,1.0713352028098442,0.24591485806272242,1000
+LGBM Regr.,LGBM Clas.,LGBM Clas.,0.95,0.981,1.2765746316095508,0.24591485806272242,1000
+LGBM Regr.,LGBM Clas.,Logistic,0.9,0.939,0.9131848507685725,0.21223789879924182,1000
+LGBM Regr.,LGBM Clas.,Logistic,0.95,0.972,1.088126863939359,0.21223789879924182,1000
+LGBM Regr.,Logistic,LGBM Clas.,0.9,0.933,0.7703469411630964,0.17142872246581048,1000
+LGBM Regr.,Logistic,LGBM Clas.,0.95,0.972,0.9179249968148138,0.17142872246581048,1000
+LassoCV,LGBM Clas.,LGBM Clas.,0.9,0.947,1.0364590345690332,0.2359720559564468,1000
+LassoCV,LGBM Clas.,LGBM Clas.,0.95,0.982,1.2350171139370278,0.2359720559564468,1000
+LassoCV,Logistic,Logistic,0.9,0.926,0.5826714123685559,0.12863481417003114,1000
+LassoCV,Logistic,Logistic,0.95,0.965,0.6942958110990313,0.12863481417003114,1000
+LassoCV,RF Clas.,RF Clas.,0.9,0.919,0.5111034002250002,0.11799184761111325,1000
+LassoCV,RF Clas.,RF Clas.,0.95,0.956,0.6090172647602495,0.11799184761111325,1000
+RF Regr.,Logistic,RF Clas.,0.9,0.923,0.5773836889150485,0.13144778185362027,1000
+RF Regr.,Logistic,RF Clas.,0.95,0.963,0.687995099984517,0.13144778185362027,1000
+RF Regr.,RF Clas.,Logistic,0.9,0.923,0.5549423867573083,0.1256504508256171,1000
+RF Regr.,RF Clas.,Logistic,0.95,0.958,0.6612546391467519,0.1256504508256171,1000
+RF Regr.,RF Clas.,RF Clas.,0.9,0.922,0.5213838221703648,0.12121755103534768,1000
+RF Regr.,RF Clas.,RF Clas.,0.95,0.961,0.6212671430647002,0.12121755103534768,1000
diff --git a/results/ssm/ssm_mar_ate_metadata.csv b/results/ssm/ssm_mar_ate_metadata.csv
new file mode 100644
index 0000000..b659c07
--- /dev/null
+++ b/results/ssm/ssm_mar_ate_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,SSMMarATECoverageSimulation,2025-06-05 21:15,251.25987704992295,3.12.3,scripts/ssm/ssm_mar_ate_config.yml
diff --git a/results/ssm/ssm_nonig_ate_config.yml b/results/ssm/ssm_nonig_ate_config.yml
new file mode 100644
index 0000000..6c5f926
--- /dev/null
+++ b/results/ssm/ssm_nonig_ate_config.yml
@@ -0,0 +1,74 @@
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800
+ random_seed: 42
+ n_jobs: -2
+dgp_parameters:
+ theta:
+ - 1.0
+ n_obs:
+ - 500
+ dim_x:
+ - 20
+learner_definitions:
+ lasso: &id001
+ name: LassoCV
+ logit: &id002
+ name: Logistic
+ rfr: &id003
+ name: RF Regr.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ rfc: &id004
+ name: RF Clas.
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+ lgbmr: &id005
+ name: LGBM Regr.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ lgbmc: &id006
+ name: LGBM Clas.
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+dml_parameters:
+ learners:
+ - ml_g: *id001
+ ml_m: *id002
+ ml_pi: *id002
+ - ml_g: *id003
+ ml_m: *id004
+ ml_pi: *id004
+ - ml_g: *id001
+ ml_m: *id004
+ ml_pi: *id004
+ - ml_g: *id003
+ ml_m: *id002
+ ml_pi: *id004
+ - ml_g: *id003
+ ml_m: *id004
+ ml_pi: *id002
+ - ml_g: *id005
+ ml_m: *id006
+ ml_pi: *id006
+ - ml_g: *id001
+ ml_m: *id006
+ ml_pi: *id006
+ - ml_g: *id005
+ ml_m: *id002
+ ml_pi: *id006
+ - ml_g: *id005
+ ml_m: *id006
+ ml_pi: *id002
+confidence_parameters:
+ level:
+ - 0.95
+ - 0.9
diff --git a/results/ssm/ssm_nonig_ate_coverage.csv b/results/ssm/ssm_nonig_ate_coverage.csv
new file mode 100644
index 0000000..9a3d225
--- /dev/null
+++ b/results/ssm/ssm_nonig_ate_coverage.csv
@@ -0,0 +1,19 @@
+Learner g,Learner m,Learner pi,level,Coverage,CI Length,Bias,repetition
+LGBM Regr.,LGBM Clas.,LGBM Clas.,0.9,0.89,1.5301470087049076,0.3770578639809072,1000
+LGBM Regr.,LGBM Clas.,LGBM Clas.,0.95,0.942,1.823282618570531,0.3770578639809072,1000
+LGBM Regr.,LGBM Clas.,Logistic,0.9,0.929,2.4676110419059616,0.6723444365149791,1000
+LGBM Regr.,LGBM Clas.,Logistic,0.95,0.969,2.9403399127694723,0.6723444365149791,1000
+LGBM Regr.,Logistic,LGBM Clas.,0.9,0.809,1.0997736728076188,0.32081226177381494,1000
+LGBM Regr.,Logistic,LGBM Clas.,0.95,0.895,1.310461158688781,0.32081226177381494,1000
+LassoCV,LGBM Clas.,LGBM Clas.,0.9,0.902,1.4984436991476344,0.3690107638339736,1000
+LassoCV,LGBM Clas.,LGBM Clas.,0.95,0.961,1.785505795207747,0.3690107638339736,1000
+LassoCV,Logistic,Logistic,0.9,0.84,3.803087791117463,1.1219550707748396,1000
+LassoCV,Logistic,Logistic,0.95,0.916,4.531658609920874,1.1219550707748396,1000
+LassoCV,RF Clas.,RF Clas.,0.9,0.76,0.6487741040070446,0.20425601204335075,1000
+LassoCV,RF Clas.,RF Clas.,0.95,0.854,0.773062026383923,0.20425601204335075,1000
+RF Regr.,Logistic,RF Clas.,0.9,0.711,0.7424019979703704,0.26224742985370675,1000
+RF Regr.,Logistic,RF Clas.,0.95,0.816,0.8846265431954046,0.26224742985370675,1000
+RF Regr.,RF Clas.,Logistic,0.9,0.898,1.5259930175436052,0.4120022228090046,1000
+RF Regr.,RF Clas.,Logistic,0.95,0.958,1.818332832805496,0.4120022228090046,1000
+RF Regr.,RF Clas.,RF Clas.,0.9,0.759,0.6647246082851119,0.21200753632780625,1000
+RF Regr.,RF Clas.,RF Clas.,0.95,0.835,0.7920682245088012,0.21200753632780625,1000
diff --git a/results/ssm/ssm_nonig_ate_metadata.csv b/results/ssm/ssm_nonig_ate_metadata.csv
new file mode 100644
index 0000000..0eab540
--- /dev/null
+++ b/results/ssm/ssm_nonig_ate_metadata.csv
@@ -0,0 +1,2 @@
+DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
+0.11.dev0,SSMNonIgnorableATECoverageSimulation,2025-06-05 19:37,152.50586200555165,3.12.3,scripts/ssm/ssm_nonig_ate_config.yml
diff --git a/scripts/did/did_cs_atte_coverage.py b/scripts/did/did_cs_atte_coverage.py
index 9be682b..25c3e77 100644
--- a/scripts/did/did_cs_atte_coverage.py
+++ b/scripts/did/did_cs_atte_coverage.py
@@ -26,7 +26,9 @@
for dgp_type in dgp_types:
datasets_dgp = []
for i in range(n_rep):
- data = make_did_SZ2020(n_obs=n_obs, dgp_type=dgp_type, cross_sectional_data=True)
+ data = make_did_SZ2020(
+ n_obs=n_obs, dgp_type=dgp_type, cross_sectional_data=True
+ )
datasets_dgp.append(data)
datasets.append(datasets_dgp)
@@ -36,9 +38,13 @@
"DGP": dgp_types,
"score": ["experimental", "observational"],
"in sample normalization": [True, False],
- "learner_g": [("LGBM", LGBMRegressor(verbose=-1)),],
- "learner_m": [("LGBM", LGBMClassifier(verbose=-1)),],
- "level": [0.95, 0.90]
+ "learner_g": [
+ ("LGBM", LGBMRegressor(verbose=-1)),
+ ],
+ "learner_m": [
+ ("LGBM", LGBMClassifier(verbose=-1)),
+ ],
+ "level": [0.95, 0.90],
}
# set up the results dataframe
@@ -61,17 +67,24 @@
# define the DoubleML data object
obj_dml_data = datasets[i_dgp][i_rep]
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
+ for learner_g_idx, (learner_g_name, ml_g) in enumerate(
+ hyperparam_dict["learner_g"]
+ ):
+ for learner_m_idx, (learner_m_name, ml_m) in enumerate(
+ hyperparam_dict["learner_m"]
+ ):
for score in hyperparam_dict["score"]:
- for in_sample_normalization in hyperparam_dict["in sample normalization"]:
+ for in_sample_normalization in hyperparam_dict[
+ "in sample normalization"
+ ]:
if score == "experimental":
dml_DiD = dml.DoubleMLDIDCS(
obj_dml_data=obj_dml_data,
ml_g=ml_g,
ml_m=None,
score=score,
- in_sample_normalization=in_sample_normalization)
+ in_sample_normalization=in_sample_normalization,
+ )
else:
assert score == "observational"
dml_DiD = dml.DoubleMLDIDCS(
@@ -79,37 +92,50 @@
ml_g=ml_g,
ml_m=ml_m,
score=score,
- in_sample_normalization=in_sample_normalization)
+ in_sample_normalization=in_sample_normalization,
+ )
dml_DiD.fit(n_jobs_cv=5)
for level_idx, level in enumerate(hyperparam_dict["level"]):
confint = dml_DiD.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
+ coverage = (confint.iloc[0, 0] < theta) & (
+ theta < confint.iloc[0, 1]
+ )
ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_DiD.coef[0] - theta),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "Score": score,
- "In-sample-norm.": in_sample_normalization,
- "DGP": dgp_type,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "Score", "In-sample-norm.", "DGP", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
+ (
+ df_results_detailed,
+ pd.DataFrame(
+ {
+ "Coverage": coverage.astype(int),
+ "CI Length": confint.iloc[0, 1]
+ - confint.iloc[0, 0],
+ "Bias": abs(dml_DiD.coef[0] - theta),
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Score": score,
+ "In-sample-norm.": in_sample_normalization,
+ "DGP": dgp_type,
+ "level": level,
+ "repetition": i_rep,
+ },
+ index=[0],
+ ),
+ ),
+ ignore_index=True,
+ )
+
+df_results = (
+ df_results_detailed.groupby(
+ ["Learner g", "Learner m", "Score", "In-sample-norm.", "DGP", "level"]
+ )
+ .agg(
+ {"Coverage": "mean", "CI Length": "mean", "Bias": "mean", "repetition": "count"}
+ )
+ .reset_index()
+)
print(df_results)
end_time = time.time()
@@ -119,13 +145,17 @@
script_name = "did_cs_atte_coverage.py"
path = "results/did/did_cs_atte_coverage"
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
+metadata = pd.DataFrame(
+ {
+ "DoubleML Version": [dml.__version__],
+ "Script": [script_name],
+ "Date": [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
+ "Total Runtime (seconds)": [total_runtime],
+ "Python Version": [
+ f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
+ ],
+ }
+)
print(metadata)
df_results.to_csv(f"{path}.csv", index=False)
diff --git a/scripts/did/did_cs_multi.py b/scripts/did/did_cs_multi.py
new file mode 100644
index 0000000..c0a6c38
--- /dev/null
+++ b/scripts/did/did_cs_multi.py
@@ -0,0 +1,13 @@
+from montecover.did import DIDCSMultiCoverageSimulation
+
+# Create and run simulation with config file
+sim = DIDCSMultiCoverageSimulation(
+ config_file="scripts/did/did_cs_multi_config.yml",
+ log_level="DEBUG",
+ log_file="logs/did/did_cs_multi_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/did/", file_prefix="did_cs_multi")
+
+# Save config file for reproducibility
+sim.save_config("results/did/did_cs_multi_config.yml")
diff --git a/scripts/did/did_cs_multi_config.yml b/scripts/did/did_cs_multi_config.yml
new file mode 100644
index 0000000..f1cdc06
--- /dev/null
+++ b/scripts/did/did_cs_multi_config.yml
@@ -0,0 +1,65 @@
+# Simulation parameters for DID Multi Coverage
+
+simulation_parameters:
+ repetitions: 500
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ DGP: [1, 4, 6] # Different DGP specifications
+ n_obs: [2000] # Sample size for each simulation (has to be a list)
+ lambda_t: [0.5]
+
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logistic: &logistic
+ name: "Logistic"
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 300 # More trees to learn slowly and steadily
+ learning_rate: 0.03 # Lower learning rate to improve generalization
+ num_leaves: 7 # Fewer leaves — simpler trees
+ max_depth: 3 # Shallow trees reduce overfitting
+ min_child_samples: 20 # Require more samples per leaf
+ subsample: 0.8 # More row sampling to add randomness
+ colsample_bytree: 0.8 # More feature sampling
+ reg_alpha: 0.1 # Add L1 regularization
+ reg_lambda: 1.0 # Increase L2 regularization
+ random_state: 42 # Reproducible
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 300 # More trees to learn slowly and steadily
+ learning_rate: 0.03 # Lower learning rate to improve generalization
+ num_leaves: 7 # Fewer leaves — simpler trees
+ max_depth: 3 # Shallow trees reduce overfitting
+ min_child_samples: 20 # Require more samples per leaf
+ subsample: 0.8 # More row sampling to add randomness
+ colsample_bytree: 0.8 # More feature sampling
+ reg_alpha: 0.1 # Add L1 regularization
+ reg_lambda: 1.0 # Increase L2 regularization
+ random_state: 42 # Reproducible
+
+dml_parameters:
+ # ML methods for ml_g and ml_m
+ learners:
+ - ml_g: *linear
+ ml_m: *logistic
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+
+ score:
+ - observational # Standard DML score
+ - experimental # Experimental score (no propensity estimation)
+
+ in_sample_normalization: [true, false]
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/did/did_pa_atte_coverage.py b/scripts/did/did_pa_atte_coverage.py
index 144597a..eef92e7 100644
--- a/scripts/did/did_pa_atte_coverage.py
+++ b/scripts/did/did_pa_atte_coverage.py
@@ -26,7 +26,9 @@
for dgp_type in dgp_types:
datasets_dgp = []
for i in range(n_rep):
- data = make_did_SZ2020(n_obs=n_obs, dgp_type=dgp_type, cross_sectional_data=False)
+ data = make_did_SZ2020(
+ n_obs=n_obs, dgp_type=dgp_type, cross_sectional_data=False
+ )
datasets_dgp.append(data)
datasets.append(datasets_dgp)
@@ -36,9 +38,13 @@
"DGP": dgp_types,
"score": ["experimental", "observational"],
"in sample normalization": [True, False],
- "learner_g": [("LGBM", LGBMRegressor(verbose=-1)),],
- "learner_m": [("LGBM", LGBMClassifier(verbose=-1)),],
- "level": [0.95, 0.90]
+ "learner_g": [
+ ("LGBM", LGBMRegressor(verbose=-1)),
+ ],
+ "learner_m": [
+ ("LGBM", LGBMClassifier(verbose=-1)),
+ ],
+ "level": [0.95, 0.90],
}
# set up the results dataframe
@@ -61,17 +67,24 @@
# define the DoubleML data object
obj_dml_data = datasets[i_dgp][i_rep]
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
+ for learner_g_idx, (learner_g_name, ml_g) in enumerate(
+ hyperparam_dict["learner_g"]
+ ):
+ for learner_m_idx, (learner_m_name, ml_m) in enumerate(
+ hyperparam_dict["learner_m"]
+ ):
for score in hyperparam_dict["score"]:
- for in_sample_normalization in hyperparam_dict["in sample normalization"]:
+ for in_sample_normalization in hyperparam_dict[
+ "in sample normalization"
+ ]:
if score == "experimental":
dml_DiD = dml.DoubleMLDID(
obj_dml_data=obj_dml_data,
ml_g=ml_g,
ml_m=None,
score=score,
- in_sample_normalization=in_sample_normalization)
+ in_sample_normalization=in_sample_normalization,
+ )
else:
assert score == "observational"
dml_DiD = dml.DoubleMLDID(
@@ -79,36 +92,49 @@
ml_g=ml_g,
ml_m=ml_m,
score=score,
- in_sample_normalization=in_sample_normalization)
+ in_sample_normalization=in_sample_normalization,
+ )
dml_DiD.fit(n_jobs_cv=5)
for level_idx, level in enumerate(hyperparam_dict["level"]):
confint = dml_DiD.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
+ coverage = (confint.iloc[0, 0] < theta) & (
+ theta < confint.iloc[0, 1]
+ )
ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_DiD.coef[0] - theta),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "Score": score,
- "In-sample-norm.": in_sample_normalization,
- "DGP": dgp_type,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "Score", "In-sample-norm.", "DGP", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
+ (
+ df_results_detailed,
+ pd.DataFrame(
+ {
+ "Coverage": coverage.astype(int),
+ "CI Length": confint.iloc[0, 1]
+ - confint.iloc[0, 0],
+ "Bias": abs(dml_DiD.coef[0] - theta),
+ "Learner g": learner_g_name,
+ "Learner m": learner_m_name,
+ "Score": score,
+ "In-sample-norm.": in_sample_normalization,
+ "DGP": dgp_type,
+ "level": level,
+ "repetition": i_rep,
+ },
+ index=[0],
+ ),
+ ),
+ ignore_index=True,
+ )
+
+df_results = (
+ df_results_detailed.groupby(
+ ["Learner g", "Learner m", "Score", "In-sample-norm.", "DGP", "level"]
+ )
+ .agg(
+ {"Coverage": "mean", "CI Length": "mean", "Bias": "mean", "repetition": "count"}
+ )
+ .reset_index()
+)
print(df_results)
end_time = time.time()
@@ -118,13 +144,17 @@
script_name = "did_pa_atte_coverage.py"
path = "results/did/did_pa_atte_coverage"
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
+metadata = pd.DataFrame(
+ {
+ "DoubleML Version": [dml.__version__],
+ "Script": [script_name],
+ "Date": [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
+ "Total Runtime (seconds)": [total_runtime],
+ "Python Version": [
+ f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
+ ],
+ }
+)
print(metadata)
df_results.to_csv(f"{path}.csv", index=False)
diff --git a/scripts/did/did_pa_multi.py b/scripts/did/did_pa_multi.py
index f06a03e..5e90ee9 100644
--- a/scripts/did/did_pa_multi.py
+++ b/scripts/did/did_pa_multi.py
@@ -1,14 +1,13 @@
-
from montecover.did import DIDMultiCoverageSimulation
# Create and run simulation with config file
sim = DIDMultiCoverageSimulation(
config_file="scripts/did/did_pa_multi_config.yml",
log_level="DEBUG",
- log_file="logs/did/did_pa_multi_sim.log"
+ log_file="logs/did/did_pa_multi_sim.log",
)
sim.run_simulation()
-sim.save_results(output_path="results/did/", file_prefix="did_multi")
+sim.save_results(output_path="results/did/", file_prefix="did_pa_multi")
# Save config file for reproducibility
sim.save_config("results/did/did_pa_multi_config.yml")
diff --git a/scripts/did/did_pa_multi_config.yml b/scripts/did/did_pa_multi_config.yml
index 67eead1..2031a60 100644
--- a/scripts/did/did_pa_multi_config.yml
+++ b/scripts/did/did_pa_multi_config.yml
@@ -1,7 +1,7 @@
# Simulation parameters for DID Multi Coverage
simulation_parameters:
- repetitions: 1000
+ repetitions: 500
max_runtime: 19800 # 5.5 hours in seconds
random_seed: 42
n_jobs: -2
@@ -10,13 +10,49 @@ dgp_parameters:
DGP: [1, 4, 6] # Different DGP specifications
n_obs: [2000] # Sample size for each simulation (has to be a list)
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logistic: &logistic
+ name: "Logistic"
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 300 # More trees to learn slowly and steadily
+ learning_rate: 0.03 # Lower learning rate to improve generalization
+ num_leaves: 7 # Fewer leaves — simpler trees
+ max_depth: 3 # Shallow trees reduce overfitting
+ min_child_samples: 20 # Require more samples per leaf
+ subsample: 0.8 # More row sampling to add randomness
+ colsample_bytree: 0.8 # More feature sampling
+ reg_alpha: 0.1 # Add L1 regularization
+ reg_lambda: 1.0 # Increase L2 regularization
+ random_state: 42 # Reproducible
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 300 # More trees to learn slowly and steadily
+ learning_rate: 0.03 # Lower learning rate to improve generalization
+ num_leaves: 7 # Fewer leaves — simpler trees
+ max_depth: 3 # Shallow trees reduce overfitting
+ min_child_samples: 20 # Require more samples per leaf
+ subsample: 0.8 # More row sampling to add randomness
+ colsample_bytree: 0.8 # More feature sampling
+ reg_alpha: 0.1 # Add L1 regularization
+ reg_lambda: 1.0 # Increase L2 regularization
+ random_state: 42 # Reproducible
+
dml_parameters:
# ML methods for ml_g and ml_m
learners:
- - ml_g: ["Linear"]
- ml_m: ["Linear"]
- - ml_g: ["LGBM"]
- ml_m: ["LGBM"]
+ - ml_g: *linear
+ ml_m: *logistic
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
score:
- observational # Standard DML score
diff --git a/scripts/irm/apo.py b/scripts/irm/apo.py
new file mode 100644
index 0000000..2821336
--- /dev/null
+++ b/scripts/irm/apo.py
@@ -0,0 +1,13 @@
+from montecover.irm import APOCoverageSimulation
+
+# Create and run simulation with config file
+sim = APOCoverageSimulation(
+ config_file="scripts/irm/apo_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/apo_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="apo")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/apo_config.yml")
diff --git a/scripts/irm/apo_config.yml b/scripts/irm/apo_config.yml
new file mode 100644
index 0000000..511907a
--- /dev/null
+++ b/scripts/irm/apo_config.yml
@@ -0,0 +1,51 @@
+# Simulation parameters for APO Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [500] # Sample size
+ n_levels: [2]
+ linear: [True]
+
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logit: &logit
+ name: "Logistic"
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+
+dml_parameters:
+ treatment_level: [0, 1, 2]
+ trimming_threshold: [0.01]
+ learners:
+ - ml_g: *linear
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *linear
+ ml_m: *lgbmc
+
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/apos.py b/scripts/irm/apos.py
new file mode 100644
index 0000000..e26657c
--- /dev/null
+++ b/scripts/irm/apos.py
@@ -0,0 +1,13 @@
+from montecover.irm import APOSCoverageSimulation
+
+# Create and run simulation with config file
+sim = APOSCoverageSimulation(
+ config_file="scripts/irm/apos_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/apos_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="apos")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/apos_config.yml")
diff --git a/scripts/irm/apos_config.yml b/scripts/irm/apos_config.yml
new file mode 100644
index 0000000..e7102c5
--- /dev/null
+++ b/scripts/irm/apos_config.yml
@@ -0,0 +1,51 @@
+# Simulation parameters for APOS Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [500] # Sample size
+ n_levels: [2]
+ linear: [True]
+
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logit: &logit
+ name: "Logistic"
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+
+dml_parameters:
+ treatment_levels: [[0, 1, 2]]
+ trimming_threshold: [0.01]
+ learners:
+ - ml_g: *linear
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *linear
+ ml_m: *lgbmc
+
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/cvar.py b/scripts/irm/cvar.py
new file mode 100644
index 0000000..2195294
--- /dev/null
+++ b/scripts/irm/cvar.py
@@ -0,0 +1,13 @@
+from montecover.irm import CVARCoverageSimulation
+
+# Create and run simulation with config file
+sim = CVARCoverageSimulation(
+ config_file="scripts/irm/cvar_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/cvar_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="cvar")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/cvar_config.yml")
diff --git a/scripts/irm/cvar_config.yml b/scripts/irm/cvar_config.yml
new file mode 100644
index 0000000..f3531b4
--- /dev/null
+++ b/scripts/irm/cvar_config.yml
@@ -0,0 +1,63 @@
+# Simulation parameters for CVAR Coverage
+
+simulation_parameters:
+ repetitions: 200
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [5000] # Sample size
+ dim_x: [5] # Number of covariates
+
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logit: &logit
+ name: "Logistic"
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 200 # Fewer trees — faster
+ learning_rate: 0.05 # Balanced speed and stability
+ num_leaves: 15 # Modest complexity for smaller data
+ max_depth: 5 # Limit tree depth to avoid overfitting
+ min_child_samples: 10 # Minimum samples per leaf — conservative
+ subsample: 0.9 # Slightly randomized rows
+ colsample_bytree: 0.9 # Slightly randomized features
+ reg_alpha: 0.0 # No L1 regularization (faster)
+ reg_lambda: 0.1 # Light L2 regularization
+ random_state: 42 # Reproducible
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 200 # Fewer trees — faster
+ learning_rate: 0.05 # Balanced speed and stability
+ num_leaves: 15 # Modest complexity for smaller data
+ max_depth: 5 # Limit tree depth to avoid overfitting
+ min_child_samples: 10 # Minimum samples per leaf — conservative
+ subsample: 0.9 # Slightly randomized rows
+ colsample_bytree: 0.9 # Slightly randomized features
+ reg_alpha: 0.0 # No L1 regularization (faster)
+ reg_lambda: 0.1 # Light L2 regularization
+ random_state: 42 # Reproducible
+
+dml_parameters:
+ tau_vec: [[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]] # Quantiles
+ trimming_threshold: [0.01]
+ learners:
+ - ml_g: *linear
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *linear
+ ml_m: *lgbmc
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/cvar_coverage.py b/scripts/irm/cvar_coverage.py
deleted file mode 100644
index 4a166d9..0000000
--- a/scripts/irm/cvar_coverage.py
+++ /dev/null
@@ -1,242 +0,0 @@
-import numpy as np
-import pandas as pd
-import multiprocessing
-from datetime import datetime
-import time
-import sys
-
-from sklearn.linear_model import LogisticRegressionCV, LinearRegression
-from lightgbm import LGBMClassifier, LGBMRegressor
-
-import doubleml as dml
-
-# set up parallelization
-n_cores = multiprocessing.cpu_count()
-print(f"Number of Cores: {n_cores}")
-cores_used = n_cores-1
-
-# Number of repetitions
-n_rep = 100
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 5000
-tau_vec = np.arange(0.2, 0.85, 0.05)
-p = 5
-
-
-# define loc-scale model
-def f_loc(D, X):
- loc = 0.5*D + 2*D*X[:, 4] + 2.0*(X[:, 1] > 0.1) - 1.7*(X[:, 0] * X[:, 2] > 0) - 3*X[:, 3]
- return loc
-
-
-def f_scale(D, X):
- scale = np.sqrt(0.5*D + 0.3*D*X[:, 1] + 2)
- return scale
-
-
-def dgp(n=200, p=5):
- X = np.random.uniform(-1, 1, size=[n, p])
- D = ((X[:, 1] - X[:, 3] + 1.5*(X[:, 0] > 0) + np.random.normal(size=n)) > 0)*1.0
- epsilon = np.random.normal(size=n)
-
- Y = f_loc(D, X) + f_scale(D, X)*epsilon
- return Y, X, D, epsilon
-
-
-# Estimate true and QTE with counterfactuals on large sample
-n_true = int(10e+6)
-
-_, X_true, _, epsilon_true = dgp(n=n_true, p=p)
-D1 = np.ones(n_true)
-D0 = np.zeros(n_true)
-
-Y1 = f_loc(D1, X_true) + f_scale(D1, X_true)*epsilon_true
-Y0 = f_loc(D0, X_true) + f_scale(D0, X_true)*epsilon_true
-
-Y1_quant = np.quantile(Y1, q=tau_vec)
-Y0_quant = np.quantile(Y0, q=tau_vec)
-Y1_cvar = [Y1[Y1 >= quant].mean() for quant in Y1_quant]
-Y0_cvar = [Y0[Y0 >= quant].mean() for quant in Y0_quant]
-CVAR = np.array(Y1_cvar) - np.array(Y0_cvar)
-
-print(f'Conditional Value at Risk Y(0): {Y0_cvar}')
-print(f'Conditional Value at Risk Y(1): {Y1_cvar}')
-print(f'Conditional Value at Risk Effect: {CVAR}')
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- Y, X, D, _ = dgp(n=n_obs, p=p)
- data = dml.DoubleMLData.from_arrays(X, Y, D)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Linear", LinearRegression()),
- ("LGBM", LGBMRegressor(n_estimators=300, learning_rate=0.05, num_leaves=10, verbose=-1))],
- "learner_m": [("Logistic Regression", LogisticRegressionCV()),
- ("LGBM", LGBMClassifier(n_estimators=300, learning_rate=0.05, num_leaves=10, verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed_qte = pd.DataFrame()
-df_results_detailed_pq0 = pd.DataFrame()
-df_results_detailed_pq1 = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep + 1}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = datasets[i_rep]
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_qte = dml.DoubleMLQTE(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- score="CVaR",
- quantiles=tau_vec
- )
- dml_qte.fit(n_jobs_models=cores_used)
- effects = dml_qte.coef
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_qte.confint(level=level)
- coverage = np.mean((confint.iloc[:, 0] < CVAR) & (CVAR < confint.iloc[:, 1]))
- ci_length = np.mean(confint.iloc[:, 1] - confint.iloc[:, 0])
-
- dml_qte.bootstrap(n_rep_boot=2000)
- confint_uniform = dml_qte.confint(level=level, joint=True)
- coverage_uniform = all((confint_uniform.iloc[:, 0] < CVAR) &
- (CVAR < confint_uniform.iloc[:, 1]))
- ci_length_uniform = np.mean(confint_uniform.iloc[:, 1] - confint_uniform.iloc[:, 0])
- df_results_detailed_qte = pd.concat(
- (df_results_detailed_qte,
- pd.DataFrame({
- "Coverage": coverage,
- "CI Length": ci_length,
- "Bias": np.mean(abs(effects - CVAR)),
- "Uniform Coverage": coverage_uniform,
- "Uniform CI Length": ci_length_uniform,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- # evaluate each model
- coverage_0 = np.zeros(len(tau_vec))
- coverage_1 = np.zeros(len(tau_vec))
-
- ci_length_0 = np.zeros(len(tau_vec))
- ci_length_1 = np.zeros(len(tau_vec))
-
- bias_0 = np.zeros(len(tau_vec))
- bias_1 = np.zeros(len(tau_vec))
- for tau_idx, tau in enumerate(tau_vec):
- model_0 = dml_qte.modellist_0[tau_idx]
- model_1 = dml_qte.modellist_1[tau_idx]
-
- confint_0 = model_0.confint(level=level)
- confint_1 = model_1.confint(level=level)
-
- coverage_0[tau_idx] = (confint_0.iloc[0, 0] < Y0_cvar[tau_idx]) & \
- (Y0_cvar[tau_idx] < confint_0.iloc[0, 1])
- coverage_1[tau_idx] = (confint_1.iloc[0, 0] < Y1_cvar[tau_idx]) & \
- (Y1_cvar[tau_idx] < confint_1.iloc[0, 1])
-
- ci_length_0[tau_idx] = confint_0.iloc[0, 1] - confint_0.iloc[0, 0]
- ci_length_1[tau_idx] = confint_1.iloc[0, 1] - confint_1.iloc[0, 0]
-
- bias_0[tau_idx] = abs(model_0.coef[0] - Y0_cvar[tau_idx])
- bias_1[tau_idx] = abs(model_1.coef[0] - Y1_cvar[tau_idx])
-
- df_results_detailed_pq0 = pd.concat(
- (df_results_detailed_pq0,
- pd.DataFrame({
- "Coverage": np.mean(coverage_0),
- "CI Length": np.mean(ci_length_0),
- "Bias": np.mean(bias_0),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- df_results_detailed_pq1 = pd.concat(
- (df_results_detailed_pq1,
- pd.DataFrame({
- "Coverage": np.mean(coverage_1),
- "CI Length": np.mean(ci_length_1),
- "Bias": np.mean(bias_1),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results_qte = df_results_detailed_qte.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_qte)
-
-df_results_pq0 = df_results_detailed_pq0.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_pq0)
-
-df_results_pq1 = df_results_detailed_pq1.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_pq1)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "cvar_coverage.py"
-path = "results/irm/cvar_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results_qte.to_csv(f"{path}_qte.csv", index=False)
-df_results_pq0.to_csv(f"{path}_pq0.csv", index=False)
-df_results_pq1.to_csv(f"{path}_pq1.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/iivm_late.py b/scripts/irm/iivm_late.py
new file mode 100644
index 0000000..c6b7942
--- /dev/null
+++ b/scripts/irm/iivm_late.py
@@ -0,0 +1,13 @@
+from montecover.irm import IIVMLATECoverageSimulation
+
+# Create and run simulation with config file
+sim = IIVMLATECoverageSimulation(
+ config_file="scripts/irm/iivm_late_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/iivm_late_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="iivm_late")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/iivm_late_config.yml")
diff --git a/scripts/irm/iivm_late_config.yml b/scripts/irm/iivm_late_config.yml
new file mode 100644
index 0000000..b81c856
--- /dev/null
+++ b/scripts/irm/iivm_late_config.yml
@@ -0,0 +1,80 @@
+# Simulation parameters for IIVM LATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [0.5] # Treatment effect
+ n_obs: [500] # Sample size
+ dim_x: [20] # Number of covariates
+ alpha_x: [1.0] # Covariate effect
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ logit: &logit
+ name: "Logistic"
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 100 # Fewer trees; with small data, fewer is often better
+ learning_rate: 0.05 # Reasonable speed without sacrificing much accuracy
+ num_leaves: 7 # Smaller trees reduce overfitting risk
+ max_depth: 3 # Shallow trees generalize better on tiny datasets
+ min_child_samples: 20 # Avoids splitting on noise
+ subsample: 1.0 # Use all rows — subsampling adds variance with small data
+ colsample_bytree: 0.8 # Still good to randomly drop some features per tree
+ reg_alpha: 0.1 # L1 regularization helps when there are many features
+ reg_lambda: 1.0 # Stronger L2 regularization improves generalization
+ random_state: 42 # Reproducibility
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 100 # Fewer trees; with small data, fewer is often better
+ learning_rate: 0.05 # Reasonable speed without sacrificing much accuracy
+ num_leaves: 7 # Smaller trees reduce overfitting risk
+ max_depth: 3 # Shallow trees generalize better on tiny datasets
+ min_child_samples: 20 # Avoids splitting on noise
+ subsample: 1.0 # Use all rows — subsampling adds variance with small data
+ colsample_bytree: 0.8 # Still good to randomly drop some features per tree
+ reg_alpha: 0.1 # L1 regularization helps when there are many features
+ reg_lambda: 1.0 # Stronger L2 regularization improves generalization
+ random_state: 42 # Reproducibility
+
+dml_parameters:
+ learners:
+ - ml_g: *lasso
+ ml_m: *logit
+ ml_r: *logit
+ - ml_g: *lasso
+ ml_m: *logit
+ ml_r: *lgbmc
+ - ml_g: *lasso
+ ml_m: *lgbmc
+ ml_r: *logit
+ - ml_g: *lasso
+ ml_m: *lgbmc
+ ml_r: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ ml_r: *logit
+ - ml_g: *lgbmr
+ ml_m: *logit
+ ml_r: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ ml_r: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ ml_r: *lgbmc
+
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/iivm_late_coverage.py b/scripts/irm/iivm_late_coverage.py
deleted file mode 100644
index 05620e0..0000000
--- a/scripts/irm/iivm_late_coverage.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
-from sklearn.linear_model import LassoCV, LogisticRegressionCV
-
-import doubleml as dml
-from doubleml.datasets import make_iivm_data
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-theta = 0.5
-n_obs = 500
-dim_x = 20
-alpha_x = 1.0
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- data = make_iivm_data(theta=theta, n_obs=n_obs, dim_x=dim_x, alpha_x=alpha_x, return_type='DataFrame')
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Lasso", LassoCV()),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "learner_m": [("Logistic Regression", LogisticRegressionCV()),
- ("Random Forest",
- RandomForestClassifier(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData(datasets[i_rep], 'y', 'd', z_cols='z')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_iivm = dml.DoubleMLIIVM(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- ml_r=ml_m,
- )
- dml_iivm.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_iivm.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_iivm.coef[0] - theta),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "iivm_late_coverage.py"
-path = "results/irm/iivm_late_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/irm_apo_coverage.py b/scripts/irm/irm_apo_coverage.py
deleted file mode 100644
index 3f5f7ae..0000000
--- a/scripts/irm/irm_apo_coverage.py
+++ /dev/null
@@ -1,227 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from lightgbm import LGBMRegressor, LGBMClassifier
-from sklearn.linear_model import LinearRegression, LogisticRegression
-
-import doubleml as dml
-from doubleml.datasets import make_irm_data_discrete_treatments
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 500
-n_levels = 2
-
-# generate the APOs true values
-data_apo_large = make_irm_data_discrete_treatments(n_obs=int(1e+6), n_levels=n_levels, linear=True)
-y0 = data_apo_large['oracle_values']['y0']
-ite = data_apo_large['oracle_values']['ite']
-d = data_apo_large['d']
-
-average_ites = np.full(n_levels + 1, np.nan)
-apos = np.full(n_levels + 1, np.nan)
-for i in range(n_levels + 1):
- average_ites[i] = np.mean(ite[d == i]) * (i > 0)
- apos[i] = np.mean(y0) + average_ites[i]
-
-ates = np.full(n_levels, np.nan)
-for i in range(n_levels):
- ates[i] = apos[i + 1] - apos[0]
-
-print(f"Levels and their counts:\n{np.unique(d, return_counts=True)}")
-print(f"True APOs: {apos}")
-print(f"True ATEs: {ates}")
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- data_apo = make_irm_data_discrete_treatments(n_obs=n_obs, n_levels=n_levels, linear=True)
- df_apo = pd.DataFrame(
- np.column_stack((data_apo['y'], data_apo['d'], data_apo['x'])),
- columns=['y', 'd'] + ['x' + str(i) for i in range(data_apo['x'].shape[1])]
- )
- datasets.append(df_apo)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g":
- [("Linear", LinearRegression()),
- ("LGBM", LGBMRegressor(verbose=-1))],
- "learner_m":
- [("Logistic", LogisticRegression()),
- ("LGBM", LGBMClassifier(verbose=-1))],
- "treatment_levels": [0.0, 1.0, 2.0],
- "level": [0.95, 0.90],
- "trimming_threshold": 0.01
-}
-
-# set up the results dataframe
-df_results_detailed_apo = pd.DataFrame()
-df_results_detailed_apos = pd.DataFrame()
-df_results_detailed_apos_constrast = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData(datasets[i_rep], 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- for treatment_idx, treatment_level in enumerate(hyperparam_dict["treatment_levels"]):
- dml_apo = dml.DoubleMLAPO(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- treatment_level=treatment_level,
- trimming_threshold=hyperparam_dict["trimming_threshold"]
- )
- dml_apo.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_apo.confint(level=level)
- coverage = (confint.iloc[0, 0] < apos[treatment_idx]) & (apos[treatment_idx] < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- df_results_detailed_apo = pd.concat(
- (df_results_detailed_apo,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_apo.coef[0] - apos[treatment_idx]),
- "Treatment Level": treatment_level,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- # calculate the APOs
- dml_apos = dml.DoubleMLAPOS(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- treatment_levels=hyperparam_dict["treatment_levels"],
- trimming_threshold=hyperparam_dict["trimming_threshold"]
- )
- dml_apos.fit(n_jobs_cv=5)
- effects = dml_apos.coef
-
- causal_contrast_model = dml_apos.causal_contrast(reference_levels=0)
- est_ates = causal_contrast_model.thetas
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_apos.confint(level=level)
- coverage = np.mean((confint.iloc[:, 0] < apos) & (apos < confint.iloc[:, 1]))
- ci_length = np.mean(confint.iloc[:, 1] - confint.iloc[:, 0])
-
- dml_apos.bootstrap(n_rep_boot=2000)
- confint_uniform = dml_apos.confint(level=level, joint=True)
- coverage_uniform = all((confint_uniform.iloc[:, 0] < apos) & (apos < confint_uniform.iloc[:, 1]))
- ci_length_uniform = np.mean(confint_uniform.iloc[:, 1] - confint_uniform.iloc[:, 0])
- df_results_detailed_apos = pd.concat(
- (df_results_detailed_apos,
- pd.DataFrame({
- "Coverage": coverage,
- "CI Length": ci_length,
- "Bias": np.mean(abs(effects - apos)),
- "Uniform Coverage": coverage_uniform,
- "Uniform CI Length": ci_length_uniform,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- # calculate the ATEs
- confint_contrast = causal_contrast_model.confint(level=level)
- coverage_contrast = np.mean((confint_contrast.iloc[:, 0] < ates) & (ates < confint_contrast.iloc[:, 1]))
- ci_length_contrast = np.mean(confint_contrast.iloc[:, 1] - confint_contrast.iloc[:, 0])
-
- causal_contrast_model.bootstrap(n_rep_boot=2000)
- confint_contrast_uniform = causal_contrast_model.confint(level=level, joint=True)
- coverage_contrast_uniform = all(
- (confint_contrast_uniform.iloc[:, 0] < ates) & (ates < confint_contrast_uniform.iloc[:, 1]))
- ci_length_contrast_uniform = np.mean(confint_contrast_uniform.iloc[:, 1] - confint_contrast_uniform.iloc[:, 0])
- df_results_detailed_apos_constrast = pd.concat(
- (df_results_detailed_apos_constrast,
- pd.DataFrame({
- "Coverage": coverage_contrast,
- "CI Length": ci_length_contrast,
- "Bias": np.mean(abs(est_ates - ates)),
- "Uniform Coverage": coverage_contrast_uniform,
- "Uniform CI Length": ci_length_contrast_uniform,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results_apo = df_results_detailed_apo.groupby(
- ["Learner g", "Learner m", "Treatment Level", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_apo)
-
-df_results_apos = df_results_detailed_apos.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_apos)
-
-df_results_apos_contrast = df_results_detailed_apos_constrast.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_apos_contrast)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "irm_apo_coverage.py"
-path = "results/irm/irm_apo_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results_apo.to_csv(f"{path}_apo.csv", index=False)
-df_results_apos.to_csv(f"{path}_apos.csv", index=False)
-df_results_apos_contrast.to_csv(f"{path}_apos_contrast.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/irm_ate.py b/scripts/irm/irm_ate.py
new file mode 100644
index 0000000..7b127bd
--- /dev/null
+++ b/scripts/irm/irm_ate.py
@@ -0,0 +1,13 @@
+from montecover.irm import IRMATECoverageSimulation
+
+# Create and run simulation with config file
+sim = IRMATECoverageSimulation(
+ config_file="scripts/irm/irm_ate_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/irm_ate_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="irm_ate")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/irm_ate_config.yml")
diff --git a/scripts/irm/irm_ate_config.yml b/scripts/irm/irm_ate_config.yml
new file mode 100644
index 0000000..6a7a3f5
--- /dev/null
+++ b/scripts/irm/irm_ate_config.yml
@@ -0,0 +1,68 @@
+# Simulation parameters for IRM ATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [0.5] # Treatment effect
+ n_obs: [500] # Sample size
+ dim_x: [20] # Number of covariates
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ logit: &logit
+ name: "Logistic"
+
+ rfr: &rfr
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ rfc: &rfc
+ name: "RF Clas."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+dml_parameters:
+ learners:
+ - ml_g: *lasso
+ ml_m: *logit
+ - ml_g: *rfr
+ ml_m: *rfc
+ - ml_g: *lasso
+ ml_m: *rfc
+ - ml_g: *rfr
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *lasso
+ ml_m: *lgbmc
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/irm_ate_coverage.py b/scripts/irm/irm_ate_coverage.py
deleted file mode 100644
index 82e7fa7..0000000
--- a/scripts/irm/irm_ate_coverage.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
-from sklearn.linear_model import LassoCV, LogisticRegressionCV
-
-import doubleml as dml
-from doubleml.datasets import make_irm_data
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-theta = 0.5
-n_obs = 500
-dim_x = 20
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- data = make_irm_data(theta=theta, n_obs=n_obs, dim_x=dim_x, return_type='DataFrame')
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Lasso", LassoCV()),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "learner_m": [("Logistic Regression", LogisticRegressionCV()),
- ("Random Forest",
- RandomForestClassifier(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData(datasets[i_rep], 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_irm = dml.DoubleMLIRM(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- )
- dml_irm.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_irm.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_irm.coef[0] - theta),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "irm_ate_coverage.py"
-path = "results/irm/irm_ate_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/irm_ate_sensitivity.py b/scripts/irm/irm_ate_sensitivity.py
index d29fa77..d3651b6 100644
--- a/scripts/irm/irm_ate_sensitivity.py
+++ b/scripts/irm/irm_ate_sensitivity.py
@@ -1,159 +1,13 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from sklearn.linear_model import LinearRegression, LogisticRegression
-from lightgbm import LGBMRegressor, LGBMClassifier
-
-import doubleml as dml
-from doubleml.datasets import make_confounded_irm_data
-
-# Number of repetitions
-n_rep = 500
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 5000
-theta = 5.0
-trimming_threshold = 0.05
-
-dgp_pars = {
- "gamma_a": 0.198,
- "beta_a": 0.582,
- "theta": theta,
- "var_epsilon_y": 1.0,
- "trimming_threshold": trimming_threshold,
- "linear": False,
-}
-
-# test inputs
-np.random.seed(42)
-dgp_dict = make_confounded_irm_data(n_obs=int(1e+6), **dgp_pars)
-
-oracle_dict = dgp_dict['oracle_values']
-rho = oracle_dict['rho_ate']
-cf_y = oracle_dict['cf_y']
-cf_d = oracle_dict['cf_d_ate']
-
-print(f"Confounding factor for Y: {cf_y}")
-print(f"Confounding factor for D: {cf_d}")
-print(f"Rho: {rho}")
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- dgp_dict = make_confounded_irm_data(n_obs=n_obs, **dgp_pars)
- datasets.append(dgp_dict)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Linear Reg.", LinearRegression()),
- ("LGBM", LGBMRegressor(n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1))],
- "learner_m": [("Logistic Regr.", LogisticRegression()),
- ("LGBM", LGBMClassifier(n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1)),],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- dgp_dict = datasets[i_rep]
-
- x_cols = [f'X{i + 1}' for i in np.arange(dgp_dict['x'].shape[1])]
- df = pd.DataFrame(np.column_stack((dgp_dict['x'], dgp_dict['y'], dgp_dict['d'])), columns=x_cols + ['y', 'd'])
- obj_dml_data = dml.DoubleMLData(df, 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_irm = dml.DoubleMLIRM(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- trimming_threshold=trimming_threshold
- )
- dml_irm.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- estimate = dml_irm.coef[0]
- confint = dml_irm.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- # test sensitivity parameters
- dml_irm.sensitivity_analysis(cf_y=cf_y, cf_d=cf_d, rho=rho, level=level, null_hypothesis=theta)
- cover_lower = theta >= dml_irm.sensitivity_params['ci']['lower']
- cover_upper = theta <= dml_irm.sensitivity_params['ci']['upper']
- rv = dml_irm.sensitivity_params['rv']
- rva = dml_irm.sensitivity_params['rva']
- bias_lower = abs(theta - dml_irm.sensitivity_params['theta']['lower'])
- bias_upper = abs(theta - dml_irm.sensitivity_params['theta']['upper'])
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(estimate - theta),
- "Coverage (Lower)": cover_lower.astype(int),
- "Coverage (Upper)": cover_upper.astype(int),
- "RV": rv,
- "RVa": rva,
- "Bias (Lower)": bias_lower,
- "Bias (Upper)": bias_upper,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Coverage (Lower)": "mean",
- "Coverage (Upper)": "mean",
- "RV": "mean",
- "RVa": "mean",
- "Bias (Lower)": "mean",
- "Bias (Upper)": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "irm_ate_sensitivity.py"
-path = "results/irm/irm_ate_sensitivity"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
+from montecover.irm import IRMATESensitivityCoverageSimulation
+
+# Create and run simulation with config file
+sim = IRMATESensitivityCoverageSimulation(
+ config_file="scripts/irm/irm_ate_sensitivity_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/irm_ate_sensitivity_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="irm_ate_sensitivity")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/irm_ate_sensitivity_config.yml")
diff --git a/scripts/irm/irm_ate_sensitivity_config.yml b/scripts/irm/irm_ate_sensitivity_config.yml
new file mode 100644
index 0000000..c051ac0
--- /dev/null
+++ b/scripts/irm/irm_ate_sensitivity_config.yml
@@ -0,0 +1,54 @@
+# Simulation parameters for IRM ATE Sensitivity Coverage
+
+simulation_parameters:
+ repetitions: 500
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [5.0] # Treatment effect
+ n_obs: [5000] # Sample size
+ trimming_threshold: [0.05] # Trimming threshold
+ var_epsilon_y: [1.0] # Variance of outcome noise
+ linear: [False]
+ gamma_a: [0.198]
+ beta_a: [0.582]
+
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logit: &logit
+ name: "Logistic"
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+
+dml_parameters:
+ learners:
+ - ml_g: *linear
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *linear
+ ml_m: *lgbmc
+
+ trimming_threshold: [0.05]
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/irm_atte.py b/scripts/irm/irm_atte.py
new file mode 100644
index 0000000..829ba0e
--- /dev/null
+++ b/scripts/irm/irm_atte.py
@@ -0,0 +1,13 @@
+from montecover.irm import IRMATTECoverageSimulation
+
+# Create and run simulation with config file
+sim = IRMATTECoverageSimulation(
+ config_file="scripts/irm/irm_atte_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/irm_atte_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="irm_atte")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/irm_atte_config.yml")
diff --git a/scripts/irm/irm_atte_config.yml b/scripts/irm/irm_atte_config.yml
new file mode 100644
index 0000000..2a3ba63
--- /dev/null
+++ b/scripts/irm/irm_atte_config.yml
@@ -0,0 +1,68 @@
+# Simulation parameters for IRM ATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [0.5] # Treatment effect
+ n_obs: [500] # Sample size
+ dim_x: [20] # Number of covariates
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ logit: &logit
+ name: "Logistic"
+
+ rfr: &rfr
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 20
+ min_samples_leaf: 2
+
+ rfc: &rfc
+ name: "RF Clas."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 20
+ min_samples_leaf: 20
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+dml_parameters:
+ learners:
+ - ml_g: *lasso
+ ml_m: *logit
+ - ml_g: *rfr
+ ml_m: *rfc
+ - ml_g: *lasso
+ ml_m: *rfc
+ - ml_g: *rfr
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *lasso
+ ml_m: *lgbmc
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/irm_atte_coverage.py b/scripts/irm/irm_atte_coverage.py
deleted file mode 100644
index 8b84743..0000000
--- a/scripts/irm/irm_atte_coverage.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
-from sklearn.linear_model import LassoCV, LogisticRegressionCV
-
-import doubleml as dml
-from doubleml.datasets import make_irm_data
-from scipy.linalg import toeplitz
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-theta = 0.5
-n_obs = 500
-dim_x = 20
-
-# We can simulate the ATTE from the function via MC-samples
-n_obs_atte = 50000
-
-# manual make irm data with default params
-R2_d = 0.5
-R2_y = 0.5
-
-v = np.random.uniform(size=[n_obs_atte, ])
-zeta = np.random.standard_normal(size=[n_obs_atte, ])
-
-cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)])
-x = np.random.multivariate_normal(np.zeros(dim_x), cov_mat, size=[n_obs_atte, ])
-
-beta = [1 / (k**2) for k in range(1, dim_x + 1)]
-b_sigma_b = np.dot(np.dot(cov_mat, beta), beta)
-c_y = np.sqrt(R2_y/((1-R2_y) * b_sigma_b))
-c_d = np.sqrt(np.pi**2 / 3. * R2_d/((1-R2_d) * b_sigma_b))
-
-xx = np.exp(np.dot(x, np.multiply(beta, c_d)))
-d = 1. * ((xx/(1+xx)) > v)
-
-y = d * theta + d * np.dot(x, np.multiply(beta, c_y)) + zeta
-y0 = zeta
-y1 = theta + np.dot(x, np.multiply(beta, c_y)) + zeta
-
-ATTE = np.mean(y1[d == 1] - y0[d == 1])
-print(ATTE)
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- data = make_irm_data(theta=theta, n_obs=n_obs, dim_x=dim_x, return_type='DataFrame')
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Lasso", LassoCV()),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "learner_m": [("Logistic Regression", LogisticRegressionCV()),
- ("Random Forest",
- RandomForestClassifier(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData(datasets[i_rep], 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_irm = dml.DoubleMLIRM(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- score="ATTE",
- )
- dml_irm.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_irm.confint(level=level)
- coverage = (confint.iloc[0, 0] < ATTE) & (ATTE < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_irm.coef[0] - ATTE),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "irm_atte_coverage.py"
-path = "results/irm/irm_atte_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/irm_atte_sensitivity.py b/scripts/irm/irm_atte_sensitivity.py
index aeda6b1..f28d9c9 100644
--- a/scripts/irm/irm_atte_sensitivity.py
+++ b/scripts/irm/irm_atte_sensitivity.py
@@ -1,159 +1,13 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from sklearn.linear_model import LinearRegression, LogisticRegression
-from lightgbm import LGBMRegressor, LGBMClassifier
-
-import doubleml as dml
-from doubleml.datasets import make_confounded_irm_data
-
-# Number of repetitions
-n_rep = 500
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 5000
-theta = 5.0
-trimming_threshold = 0.05
-
-dgp_pars = {
- "gamma_a": 0.151,
- "beta_a": 0.580,
- "theta": theta,
- "var_epsilon_y": 1.0,
- "trimming_threshold": trimming_threshold,
- "linear": False,
-}
-
-# test inputs
-np.random.seed(42)
-dgp_dict = make_confounded_irm_data(n_obs=int(1e+6), **dgp_pars)
-
-oracle_dict = dgp_dict['oracle_values']
-rho = oracle_dict['rho_atte']
-cf_y = oracle_dict['cf_y']
-cf_d = oracle_dict['cf_d_atte']
-
-print(f"Confounding factor for Y: {cf_y}")
-print(f"Confounding factor for D: {cf_d}")
-print(f"Rho: {rho}")
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- dgp_dict = make_confounded_irm_data(n_obs=n_obs, **dgp_pars)
- datasets.append(dgp_dict)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Linear Reg.", LinearRegression()),
- ("LGBM", LGBMRegressor(n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1))],
- "learner_m": [("Logistic Regr.", LogisticRegression()),
- ("LGBM", LGBMClassifier(n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1)),],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- dgp_dict = datasets[i_rep]
-
- x_cols = [f'X{i + 1}' for i in np.arange(dgp_dict['x'].shape[1])]
- df = pd.DataFrame(np.column_stack((dgp_dict['x'], dgp_dict['y'], dgp_dict['d'])), columns=x_cols + ['y', 'd'])
- obj_dml_data = dml.DoubleMLData(df, 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_irm = dml.DoubleMLIRM(
- obj_dml_data=obj_dml_data,
- score='ATTE',
- ml_g=ml_g,
- ml_m=ml_m,
- trimming_threshold=trimming_threshold
- )
- dml_irm.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- estimate = dml_irm.coef[0]
- confint = dml_irm.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- # test sensitivity parameters
- dml_irm.sensitivity_analysis(cf_y=cf_y, cf_d=cf_d, rho=rho, level=level, null_hypothesis=theta)
- cover_lower = theta >= dml_irm.sensitivity_params['ci']['lower']
- cover_upper = theta <= dml_irm.sensitivity_params['ci']['upper']
- rv = dml_irm.sensitivity_params['rv']
- rva = dml_irm.sensitivity_params['rva']
- bias_lower = abs(theta - dml_irm.sensitivity_params['theta']['lower'])
- bias_upper = abs(theta - dml_irm.sensitivity_params['theta']['upper'])
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(estimate - theta),
- "Coverage (Lower)": cover_lower.astype(int),
- "Coverage (Upper)": cover_upper.astype(int),
- "RV": rv,
- "RVa": rva,
- "Bias (Lower)": bias_lower,
- "Bias (Upper)": bias_upper,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Coverage (Lower)": "mean",
- "Coverage (Upper)": "mean",
- "RV": "mean",
- "RVa": "mean",
- "Bias (Lower)": "mean",
- "Bias (Upper)": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "irm_atte_sensitivity.py"
-path = "results/irm/irm_atte_sensitivity"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
+from montecover.irm import IRMATTESensitivityCoverageSimulation
+
+# Create and run simulation with config file
+sim = IRMATTESensitivityCoverageSimulation(
+ config_file="scripts/irm/irm_atte_sensitivity_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/irm_atte_sensitivity_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="irm_atte_sensitivity")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/irm_atte_sensitivity_config.yml")
diff --git a/scripts/irm/irm_atte_sensitivity_config.yml b/scripts/irm/irm_atte_sensitivity_config.yml
new file mode 100644
index 0000000..e6df9c4
--- /dev/null
+++ b/scripts/irm/irm_atte_sensitivity_config.yml
@@ -0,0 +1,54 @@
+# Simulation parameters for IRM ATTE Sensitivity Coverage
+
+simulation_parameters:
+ repetitions: 500
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [5.0] # Treatment effect
+ n_obs: [5000] # Sample size
+ trimming_threshold: [0.05] # Trimming threshold
+ var_epsilon_y: [1.0] # Variance of outcome noise
+ linear: [False]
+ gamma_a: [0.151]
+ beta_a: [0.582]
+
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logit: &logit
+ name: "Logistic"
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+ min_child_samples: 10
+
+dml_parameters:
+ learners:
+ - ml_g: *linear
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *linear
+ ml_m: *lgbmc
+
+ trimming_threshold: [0.05]
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/irm_cate.py b/scripts/irm/irm_cate.py
new file mode 100644
index 0000000..6d265b3
--- /dev/null
+++ b/scripts/irm/irm_cate.py
@@ -0,0 +1,13 @@
+from montecover.irm import IRMCATECoverageSimulation
+
+# Create and run simulation with config file
+sim = IRMCATECoverageSimulation(
+ config_file="scripts/irm/irm_cate_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/irm_cate_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="irm_cate")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/irm_cate_config.yml")
diff --git a/scripts/irm/irm_cate_config.yml b/scripts/irm/irm_cate_config.yml
new file mode 100644
index 0000000..c09f225
--- /dev/null
+++ b/scripts/irm/irm_cate_config.yml
@@ -0,0 +1,69 @@
+# Simulation parameters for IRM CATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [500] # Sample size
+ p: [10] # Number of covariates
+ support_size: [5] # Number of non-zero coefficients
+ n_x: [1]
+
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logit: &logit
+ name: "Logistic"
+
+ rfr: &rfr
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ rfc: &rfc
+ name: "RF Clas."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+dml_parameters:
+ learners:
+ - ml_g: *linear
+ ml_m: *logit
+ - ml_g: *rfr
+ ml_m: *rfc
+ - ml_g: *linear
+ ml_m: *rfc
+ - ml_g: *rfr
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *linear
+ ml_m: *lgbmc
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/irm_cate_coverage.py b/scripts/irm/irm_cate_coverage.py
deleted file mode 100644
index 9843de1..0000000
--- a/scripts/irm/irm_cate_coverage.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-import patsy
-
-from lightgbm import LGBMRegressor, LGBMClassifier
-from sklearn.linear_model import LassoCV, LogisticRegressionCV
-
-import doubleml as dml
-from doubleml.datasets import make_heterogeneous_data
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 2000
-p = 10
-support_size = 5
-n_x = 1
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- data = make_heterogeneous_data(n_obs=n_obs, p=p, support_size=support_size, n_x=n_x, binary_treatment=True)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Lasso", LassoCV()),
- ("LGBM", LGBMRegressor(n_estimators=200, learning_rate=0.05, verbose=-1))],
- "learner_m": [("Logistic Regression", LogisticRegressionCV()),
- ("LGBM", LGBMClassifier(n_estimators=200, learning_rate=0.05, verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- data = datasets[i_rep]['data']
- design_matrix = patsy.dmatrix("bs(x, df=5, degree=2)", {"x": data["X_0"]})
- spline_basis = pd.DataFrame(design_matrix)
-
- true_effects = datasets[i_rep]['effects']
-
- obj_dml_data = dml.DoubleMLData(data, 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_irm = dml.DoubleMLIRM(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- )
- dml_irm.fit(n_jobs_cv=5)
- cate = dml_irm.cate(spline_basis)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = cate.confint(basis=spline_basis, level=level)
- effects = confint["effect"]
- coverage = (confint.iloc[:, 0] < true_effects) & (true_effects < confint.iloc[:, 2])
- ci_length = confint.iloc[:, 2] - confint.iloc[:, 0]
- confint_uniform = cate.confint(basis=spline_basis, level=0.95, joint=True, n_rep_boot=2000)
- coverage_uniform = all((confint_uniform.iloc[:, 0] < true_effects) &
- (true_effects < confint_uniform.iloc[:, 2]))
- ci_length_uniform = confint_uniform.iloc[:, 2] - confint_uniform.iloc[:, 0]
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.mean(),
- "CI Length": ci_length.mean(),
- "Bias": abs(effects - true_effects).mean(),
- "Uniform Coverage": coverage_uniform,
- "Uniform CI Length": ci_length_uniform.mean(),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "irm_cate_coverage.py"
-path = "results/irm/irm_cate_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/irm_gate.py b/scripts/irm/irm_gate.py
new file mode 100644
index 0000000..97fc0f3
--- /dev/null
+++ b/scripts/irm/irm_gate.py
@@ -0,0 +1,13 @@
+from montecover.irm import IRMGATECoverageSimulation
+
+# Create and run simulation with config file
+sim = IRMGATECoverageSimulation(
+ config_file="scripts/irm/irm_gate_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/irm_gate_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="irm_gate")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/irm_gate_config.yml")
diff --git a/scripts/irm/irm_gate_config.yml b/scripts/irm/irm_gate_config.yml
new file mode 100644
index 0000000..3143ef1
--- /dev/null
+++ b/scripts/irm/irm_gate_config.yml
@@ -0,0 +1,69 @@
+# Simulation parameters for IRM ATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [500] # Sample size
+ p: [10] # Number of covariates
+ support_size: [5] # Number of non-zero coefficients
+ n_x: [1]
+
+# Define reusable learner configurations
+learner_definitions:
+ linear: &linear
+ name: "Linear"
+
+ logit: &logit
+ name: "Logistic"
+
+ rfr: &rfr
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ rfc: &rfc
+ name: "RF Clas."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+dml_parameters:
+ learners:
+ - ml_g: *linear
+ ml_m: *logit
+ - ml_g: *rfr
+ ml_m: *rfc
+ - ml_g: *linear
+ ml_m: *rfc
+ - ml_g: *rfr
+ ml_m: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ - ml_g: *linear
+ ml_m: *lgbmc
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/irm_gate_coverage.py b/scripts/irm/irm_gate_coverage.py
deleted file mode 100644
index aafd915..0000000
--- a/scripts/irm/irm_gate_coverage.py
+++ /dev/null
@@ -1,130 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from lightgbm import LGBMRegressor, LGBMClassifier
-from sklearn.linear_model import LassoCV, LogisticRegressionCV
-
-import doubleml as dml
-from doubleml.datasets import make_heterogeneous_data
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 500
-p = 10
-support_size = 5
-n_x = 1
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- data = make_heterogeneous_data(n_obs=n_obs, p=p, support_size=support_size, n_x=n_x, binary_treatment=True)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Lasso", LassoCV()),
- ("LGBM", LGBMRegressor(n_estimators=200, learning_rate=0.05, verbose=-1))],
- "learner_m": [("Logistic Regression", LogisticRegressionCV()),
- ("LGBM", LGBMClassifier(n_estimators=200, learning_rate=0.05, verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- data = datasets[i_rep]['data']
- ite = datasets[i_rep]['effects']
-
- groups = pd.DataFrame(
- np.column_stack((data['X_0'] <= 0.3,
- (data['X_0'] > 0.3) & (data['X_0'] <= 0.7),
- data['X_0'] > 0.7)),
- columns=['Group 1', 'Group 2', 'Group 3'])
- true_effects = [ite[groups[group]].mean() for group in groups.columns]
-
- obj_dml_data = dml.DoubleMLData(data, 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_irm = dml.DoubleMLIRM(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- )
- dml_irm.fit(n_jobs_cv=5)
- gate = dml_irm.gate(groups=groups)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = gate.confint(level=level)
- effects = confint["effect"]
- coverage = (confint.iloc[:, 0] < true_effects) & (true_effects < confint.iloc[:, 2])
- ci_length = confint.iloc[:, 2] - confint.iloc[:, 0]
- confint_uniform = gate.confint(level=0.95, joint=True, n_rep_boot=2000)
- coverage_uniform = all((confint_uniform.iloc[:, 0] < true_effects) &
- (true_effects < confint_uniform.iloc[:, 2]))
- ci_length_uniform = confint_uniform.iloc[:, 2] - confint_uniform.iloc[:, 0]
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.mean(),
- "CI Length": ci_length.mean(),
- "Bias": abs(effects - true_effects).mean(),
- "Uniform Coverage": coverage_uniform,
- "Uniform CI Length": ci_length_uniform.mean(),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "irm_gate_coverage.py"
-path = "results/irm/irm_gate_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/lpq.py b/scripts/irm/lpq.py
new file mode 100644
index 0000000..220aeab
--- /dev/null
+++ b/scripts/irm/lpq.py
@@ -0,0 +1,13 @@
+from montecover.irm import LPQCoverageSimulation
+
+# Create and run simulation with config file
+sim = LPQCoverageSimulation(
+ config_file="scripts/irm/lpq_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/lpq_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="lpq")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/lpq_config.yml")
diff --git a/scripts/irm/lpq_config.yml b/scripts/irm/lpq_config.yml
new file mode 100644
index 0000000..ba717e4
--- /dev/null
+++ b/scripts/irm/lpq_config.yml
@@ -0,0 +1,46 @@
+# Simulation parameters for LPQ Coverage
+
+simulation_parameters:
+ repetitions: 200
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [5000] # Sample size
+ dim_x: [5] # Number of covariates
+
+# Define reusable learner configurations
+learner_definitions:
+ logit: &logit
+ name: "Logistic"
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 200 # Fewer trees — faster
+ learning_rate: 0.05 # Balanced speed and stability
+ num_leaves: 15 # Modest complexity for smaller data
+ max_depth: 5 # Limit tree depth to avoid overfitting
+ min_child_samples: 10 # Minimum samples per leaf — conservative
+ subsample: 0.9 # Slightly randomized rows
+ colsample_bytree: 0.9 # Slightly randomized features
+ reg_alpha: 0.0 # No L1 regularization (faster)
+ reg_lambda: 0.1 # Light L2 regularization
+ random_state: 42 # Reproducible
+
+dml_parameters:
+ tau_vec: [[0.3, 0.4, 0.5, 0.6, 0.7]] # Quantiles
+ trimming_threshold: [0.01]
+ learners:
+ - ml_g: *logit
+ ml_m: *logit
+ - ml_g: *lgbmc
+ ml_m: *lgbmc
+ - ml_g: *lgbmc
+ ml_m: *logit
+ - ml_g: *logit
+ ml_m: *lgbmc
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/lpq_coverage.py b/scripts/irm/lpq_coverage.py
deleted file mode 100644
index ab1152a..0000000
--- a/scripts/irm/lpq_coverage.py
+++ /dev/null
@@ -1,259 +0,0 @@
-import numpy as np
-import pandas as pd
-import multiprocessing
-from datetime import datetime
-import time
-import sys
-
-from sklearn.linear_model import LogisticRegressionCV
-from lightgbm import LGBMClassifier
-
-import doubleml as dml
-
-# set up parallelization
-n_cores = multiprocessing.cpu_count()
-print(f"Number of Cores: {n_cores}")
-cores_used = n_cores-1
-
-# Number of repetitions
-n_rep = 100
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 5000
-tau_vec = np.arange(0.3, 0.75, 0.05)
-p = 5
-
-
-# define loc-scale model
-def f_loc(D, X, X_conf):
- loc = 0.5*D + 2*D*X[:, 4] + 2.0*(X[:, 1] > 0.1) - 1.7*(X[:, 0] * X[:, 2] > 0) - 3*X[:, 3] - 2*X_conf[:, 0]
- return loc
-
-
-def f_scale(D, X, X_conf):
- scale = np.sqrt(0.5*D + 3*D*X[:, 0] + 0.4*X_conf[:, 0] + 2)
- return scale
-
-
-def generate_treatment(Z, X, X_conf):
- eta = np.random.normal(size=len(Z))
- d = ((0.5*Z - 0.3*X[:, 0] + 0.7*X_conf[:, 0] + eta) > 0)*1.0
- return d
-
-
-def dgp(n=200, p=5):
- X = np.random.uniform(0, 1, size=[n, p])
- X_conf = np.random.uniform(-1, 1, size=[n, 1])
- Z = np.random.binomial(1, p=0.5, size=n)
- D = generate_treatment(Z, X, X_conf)
- epsilon = np.random.normal(size=n)
-
- Y = f_loc(D, X, X_conf) + f_scale(D, X, X_conf)*epsilon
-
- return Y, X, D, Z
-
-
-# Estimate true LPQ and LQTE with counterfactuals on large sample
-
-n_true = int(10e+6)
-
-X_true = np.random.uniform(0, 1, size=[n_true, p])
-X_conf_true = np.random.uniform(-1, 1, size=[n_true, 1])
-Z_true = np.random.binomial(1, p=0.5, size=n_true)
-eta_true = np.random.normal(size=n_true)
-D1_true = generate_treatment(np.ones_like(Z_true), X_true, X_conf_true)
-D0_true = generate_treatment(np.zeros_like(Z_true), X_true, X_conf_true)
-epsilon_true = np.random.normal(size=n_true)
-
-compliers = (D1_true == 1) * (D0_true == 0)
-print(f'Compliance probability: {str(compliers.mean())}')
-n_compliers = compliers.sum()
-Y1 = f_loc(np.ones(n_compliers), X_true[compliers, :], X_conf_true[compliers, :]) +\
- f_scale(np.ones(n_compliers), X_true[compliers, :], X_conf_true[compliers, :])*epsilon_true[compliers]
-Y0 = f_loc(np.zeros(n_compliers), X_true[compliers, :], X_conf_true[compliers, :]) +\
- f_scale(np.zeros(n_compliers), X_true[compliers, :], X_conf_true[compliers, :])*epsilon_true[compliers]
-
-Y0_quant = np.quantile(Y0, q=tau_vec)
-Y1_quant = np.quantile(Y1, q=tau_vec)
-print(f'Local Potential Quantile Y(0): {Y0_quant}')
-print(f'Local Potential Quantile Y(1): {Y1_quant}')
-LQTE = Y1_quant - Y0_quant
-print(f'Local Quantile Treatment Effect: {LQTE}')
-
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- Y, X, D, Z = dgp(n=n_obs, p=p)
- data = dml.DoubleMLData.from_arrays(X, Y, D, Z)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Logistic Regression", LogisticRegressionCV()),
- ("LGBM", LGBMClassifier(n_estimators=300, learning_rate=0.05, num_leaves=10, verbose=-1))],
- "learner_m": [("Logistic Regression", LogisticRegressionCV()),
- ("LGBM", LGBMClassifier(n_estimators=300, learning_rate=0.05, num_leaves=10, verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed_qte = pd.DataFrame()
-df_results_detailed_pq0 = pd.DataFrame()
-df_results_detailed_pq1 = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = datasets[i_rep]
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_qte = dml.DoubleMLQTE(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- score='LPQ',
- quantiles=tau_vec
- )
- dml_qte.fit(n_jobs_models=cores_used)
- effects = dml_qte.coef
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_qte.confint(level=level)
- coverage = np.mean((confint.iloc[:, 0] < LQTE) & (LQTE < confint.iloc[:, 1]))
- ci_length = np.mean(confint.iloc[:, 1] - confint.iloc[:, 0])
-
- dml_qte.bootstrap(n_rep_boot=2000)
- confint_uniform = dml_qte.confint(level=level, joint=True)
- coverage_uniform = all((confint_uniform.iloc[:, 0] < LQTE) &
- (LQTE < confint_uniform.iloc[:, 1]))
- ci_length_uniform = np.mean(confint_uniform.iloc[:, 1] - confint_uniform.iloc[:, 0])
- df_results_detailed_qte = pd.concat(
- (df_results_detailed_qte,
- pd.DataFrame({
- "Coverage": coverage,
- "CI Length": ci_length,
- "Bias": np.mean(abs(effects - LQTE)),
- "Uniform Coverage": coverage_uniform,
- "Uniform CI Length": ci_length_uniform,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- # evaluate each model
- coverage_0 = np.zeros(len(tau_vec))
- coverage_1 = np.zeros(len(tau_vec))
-
- ci_length_0 = np.zeros(len(tau_vec))
- ci_length_1 = np.zeros(len(tau_vec))
-
- bias_0 = np.zeros(len(tau_vec))
- bias_1 = np.zeros(len(tau_vec))
- for tau_idx, tau in enumerate(tau_vec):
- model_0 = dml_qte.modellist_0[tau_idx]
- model_1 = dml_qte.modellist_1[tau_idx]
-
- confint_0 = model_0.confint(level=level)
- confint_1 = model_1.confint(level=level)
-
- coverage_0[tau_idx] = (confint_0.iloc[0, 0] < Y0_quant[tau_idx]) & \
- (Y0_quant[tau_idx] < confint_0.iloc[0, 1])
- coverage_1[tau_idx] = (confint_1.iloc[0, 0] < Y1_quant[tau_idx]) & \
- (Y1_quant[tau_idx] < confint_1.iloc[0, 1])
-
- ci_length_0[tau_idx] = confint_0.iloc[0, 1] - confint_0.iloc[0, 0]
- ci_length_1[tau_idx] = confint_1.iloc[0, 1] - confint_1.iloc[0, 0]
-
- bias_0[tau_idx] = abs(model_0.coef[0] - Y0_quant[tau_idx])
- bias_1[tau_idx] = abs(model_1.coef[0] - Y1_quant[tau_idx])
-
- df_results_detailed_pq0 = pd.concat(
- (df_results_detailed_pq0,
- pd.DataFrame({
- "Coverage": np.mean(coverage_0),
- "CI Length": np.mean(ci_length_0),
- "Bias": np.mean(bias_0),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- df_results_detailed_pq1 = pd.concat(
- (df_results_detailed_pq1,
- pd.DataFrame({
- "Coverage": np.mean(coverage_1),
- "CI Length": np.mean(ci_length_1),
- "Bias": np.mean(bias_1),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results_qte = df_results_detailed_qte.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_qte)
-
-df_results_pq0 = df_results_detailed_pq0.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_pq0)
-
-df_results_pq1 = df_results_detailed_pq1.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_pq1)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "lpq_coverage.py"
-path = "results/irm/lpq_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results_qte.to_csv(f"{path}_lqte.csv", index=False)
-df_results_pq0.to_csv(f"{path}_lpq0.csv", index=False)
-df_results_pq1.to_csv(f"{path}_lpq1.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/pq.py b/scripts/irm/pq.py
new file mode 100644
index 0000000..61237c0
--- /dev/null
+++ b/scripts/irm/pq.py
@@ -0,0 +1,13 @@
+from montecover.irm import PQCoverageSimulation
+
+# Create and run simulation with config file
+sim = PQCoverageSimulation(
+ config_file="scripts/irm/pq_config.yml",
+ log_level="INFO",
+ log_file="logs/irm/pq_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/irm/", file_prefix="pq")
+
+# Save config file for reproducibility
+sim.save_config("results/irm/pq_config.yml")
diff --git a/scripts/irm/pq_config.yml b/scripts/irm/pq_config.yml
new file mode 100644
index 0000000..fcc566f
--- /dev/null
+++ b/scripts/irm/pq_config.yml
@@ -0,0 +1,46 @@
+# Simulation parameters for PQ Coverage
+
+simulation_parameters:
+ repetitions: 200
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [5000] # Sample size
+ dim_x: [5] # Number of covariates
+
+# Define reusable learner configurations
+learner_definitions:
+ logit: &logit
+ name: "Logistic"
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 200 # Fewer trees — faster
+ learning_rate: 0.05 # Balanced speed and stability
+ num_leaves: 15 # Modest complexity for smaller data
+ max_depth: 5 # Limit tree depth to avoid overfitting
+ min_child_samples: 10 # Minimum samples per leaf — conservative
+ subsample: 0.9 # Slightly randomized rows
+ colsample_bytree: 0.9 # Slightly randomized features
+ reg_alpha: 0.0 # No L1 regularization (faster)
+ reg_lambda: 0.1 # Light L2 regularization
+ random_state: 42 # Reproducible
+
+dml_parameters:
+ tau_vec: [[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]] # Quantiles
+ trimming_threshold: [0.01]
+ learners:
+ - ml_g: *logit
+ ml_m: *logit
+ - ml_g: *lgbmc
+ ml_m: *lgbmc
+ - ml_g: *lgbmc
+ ml_m: *logit
+ - ml_g: *logit
+ ml_m: *lgbmc
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/irm/pq_coverage.py b/scripts/irm/pq_coverage.py
deleted file mode 100644
index 2a6a727..0000000
--- a/scripts/irm/pq_coverage.py
+++ /dev/null
@@ -1,238 +0,0 @@
-import numpy as np
-import pandas as pd
-import multiprocessing
-from datetime import datetime
-import time
-import sys
-
-from sklearn.linear_model import LogisticRegressionCV
-from lightgbm import LGBMClassifier
-
-import doubleml as dml
-
-# set up parallelization
-n_cores = multiprocessing.cpu_count()
-print(f"Number of Cores: {n_cores}")
-cores_used = n_cores-1
-
-# Number of repetitions
-n_rep = 100
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 5000
-tau_vec = np.arange(0.2, 0.85, 0.05)
-p = 5
-
-
-# define loc-scale model
-def f_loc(D, X):
- loc = 0.5*D + 2*D*X[:, 4] + 2.0*(X[:, 1] > 0.1) - 1.7*(X[:, 0] * X[:, 2] > 0) - 3*X[:, 3]
- return loc
-
-
-def f_scale(D, X):
- scale = np.sqrt(0.5*D + 0.3*D*X[:, 1] + 2)
- return scale
-
-
-def dgp(n=200, p=5):
- X = np.random.uniform(-1, 1, size=[n, p])
- D = ((X[:, 1] - X[:, 3] + 1.5*(X[:, 0] > 0) + np.random.normal(size=n)) > 0)*1.0
- epsilon = np.random.normal(size=n)
-
- Y = f_loc(D, X) + f_scale(D, X)*epsilon
- return Y, X, D, epsilon
-
-
-# Estimate true PQ and QTE with counterfactuals on large sample
-n_true = int(10e+6)
-
-_, X_true, _, epsilon_true = dgp(n=n_true, p=p)
-D1 = np.ones(n_true)
-D0 = np.zeros(n_true)
-
-Y1 = f_loc(D1, X_true) + f_scale(D1, X_true)*epsilon_true
-Y0 = f_loc(D0, X_true) + f_scale(D0, X_true)*epsilon_true
-
-Y1_quant = np.quantile(Y1, q=tau_vec)
-Y0_quant = np.quantile(Y0, q=tau_vec)
-QTE = Y1_quant - Y0_quant
-
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- Y, X, D, _ = dgp(n=n_obs,
- p=p)
- data = dml.DoubleMLData.from_arrays(X, Y, D)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Logistic Regression", LogisticRegressionCV()),
- ("LGBM", LGBMClassifier(n_estimators=300, learning_rate=0.05, num_leaves=10, verbose=-1))],
- "learner_m": [("Logistic Regression", LogisticRegressionCV()),
- ("LGBM", LGBMClassifier(n_estimators=300, learning_rate=0.05, num_leaves=10, verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed_qte = pd.DataFrame()
-df_results_detailed_pq0 = pd.DataFrame()
-df_results_detailed_pq1 = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = datasets[i_rep]
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_qte = dml.DoubleMLQTE(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- score="PQ",
- quantiles=tau_vec
- )
- dml_qte.fit(n_jobs_models=cores_used)
- effects = dml_qte.coef
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_qte.confint(level=level)
- coverage = np.mean((confint.iloc[:, 0] < QTE) & (QTE < confint.iloc[:, 1]))
- ci_length = np.mean(confint.iloc[:, 1] - confint.iloc[:, 0])
-
- dml_qte.bootstrap(n_rep_boot=2000)
- confint_uniform = dml_qte.confint(level=level, joint=True)
- coverage_uniform = all((confint_uniform.iloc[:, 0] < QTE) &
- (QTE < confint_uniform.iloc[:, 1]))
- ci_length_uniform = np.mean(confint_uniform.iloc[:, 1] - confint_uniform.iloc[:, 0])
- df_results_detailed_qte = pd.concat(
- (df_results_detailed_qte,
- pd.DataFrame({
- "Coverage": coverage,
- "CI Length": ci_length,
- "Bias": np.mean(abs(effects - QTE)),
- "Uniform Coverage": coverage_uniform,
- "Uniform CI Length": ci_length_uniform,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- # evaluate each model
- coverage_0 = np.zeros(len(tau_vec))
- coverage_1 = np.zeros(len(tau_vec))
-
- ci_length_0 = np.zeros(len(tau_vec))
- ci_length_1 = np.zeros(len(tau_vec))
-
- bias_0 = np.zeros(len(tau_vec))
- bias_1 = np.zeros(len(tau_vec))
- for tau_idx, tau in enumerate(tau_vec):
- model_0 = dml_qte.modellist_0[tau_idx]
- model_1 = dml_qte.modellist_1[tau_idx]
-
- confint_0 = model_0.confint(level=level)
- confint_1 = model_1.confint(level=level)
-
- coverage_0[tau_idx] = (confint_0.iloc[0, 0] < Y0_quant[tau_idx]) & \
- (Y0_quant[tau_idx] < confint_0.iloc[0, 1])
- coverage_1[tau_idx] = (confint_1.iloc[0, 0] < Y1_quant[tau_idx]) & \
- (Y1_quant[tau_idx] < confint_1.iloc[0, 1])
-
- ci_length_0[tau_idx] = confint_0.iloc[0, 1] - confint_0.iloc[0, 0]
- ci_length_1[tau_idx] = confint_1.iloc[0, 1] - confint_1.iloc[0, 0]
-
- bias_0[tau_idx] = abs(model_0.coef[0] - Y0_quant[tau_idx])
- bias_1[tau_idx] = abs(model_1.coef[0] - Y1_quant[tau_idx])
-
- df_results_detailed_pq0 = pd.concat(
- (df_results_detailed_pq0,
- pd.DataFrame({
- "Coverage": np.mean(coverage_0),
- "CI Length": np.mean(ci_length_0),
- "Bias": np.mean(bias_0),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- df_results_detailed_pq1 = pd.concat(
- (df_results_detailed_pq1,
- pd.DataFrame({
- "Coverage": np.mean(coverage_1),
- "CI Length": np.mean(ci_length_1),
- "Bias": np.mean(bias_1),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results_qte = df_results_detailed_qte.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_qte)
-
-df_results_pq0 = df_results_detailed_pq0.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_pq0)
-
-df_results_pq1 = df_results_detailed_pq1.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results_pq1)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "pq_coverage.py"
-path = "results/irm/pq_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results_qte.to_csv(f"{path}_qte.csv", index=False)
-df_results_pq0.to_csv(f"{path}_pq0.csv", index=False)
-df_results_pq1.to_csv(f"{path}_pq1.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/ssm_mar_ate_coverage.py b/scripts/irm/ssm_mar_ate_coverage.py
deleted file mode 100644
index 8aa0e56..0000000
--- a/scripts/irm/ssm_mar_ate_coverage.py
+++ /dev/null
@@ -1,124 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from lightgbm import LGBMRegressor, LGBMClassifier
-from sklearn.linear_model import LassoCV, LogisticRegressionCV
-
-import doubleml as dml
-from doubleml.datasets import make_ssm_data
-
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-theta = 1.0
-n_obs = 500
-dim_x = 20
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-
-datasets = []
-for i in range(n_rep):
- data = make_ssm_data(theta=theta, n_obs=n_obs, dim_x=dim_x, mar=True, return_type='DataFrame')
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "score": ["missing-at-random"],
- "learner_g": [("Lasso", LassoCV()),
- ("LGBM",
- LGBMRegressor(verbose=-1))],
- "learner_m": [("Logistic", LogisticRegressionCV()),
- ("LGBM",
- LGBMClassifier(verbose=-1))],
- "learner_pi": [("Logistic", LogisticRegressionCV()),
- ("LGBM",
- LGBMClassifier(verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData(datasets[i_rep], 'y', 'd', s_col='s')
-
- for score_idx, score in enumerate(hyperparam_dict["score"]):
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- for learner_pi_idx, (learner_pi_name, ml_pi) in enumerate(hyperparam_dict["learner_pi"]):
-
- dml_ssm = dml.DoubleMLSSM(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- ml_pi=ml_pi,
- score=score,
- )
- dml_ssm.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_ssm.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_ssm.coef[0] - theta),
- "score": score,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "Learner pi": learner_pi_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "Learner pi", "score", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "ssm_mar_ate_coverage.py"
-path = "results/irm/ssm_mar_ate_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/irm/ssm_nonignorable_ate_coverage.py b/scripts/irm/ssm_nonignorable_ate_coverage.py
deleted file mode 100644
index ebbacfe..0000000
--- a/scripts/irm/ssm_nonignorable_ate_coverage.py
+++ /dev/null
@@ -1,124 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from lightgbm import LGBMRegressor, LGBMClassifier
-from sklearn.linear_model import LassoCV, LogisticRegressionCV
-
-import doubleml as dml
-from doubleml.datasets import make_ssm_data
-
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-theta = 1.0
-n_obs = 500
-dim_x = 20
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-
-datasets = []
-for i in range(n_rep):
- data = make_ssm_data(theta=theta, n_obs=n_obs, dim_x=dim_x, mar=False, return_type='DataFrame')
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "score": ["nonignorable"],
- "learner_g": [("Lasso", LassoCV()),
- ("LGBM",
- LGBMRegressor(verbose=-1))],
- "learner_m": [("Logistic", LogisticRegressionCV()),
- ("LGBM",
- LGBMClassifier(verbose=-1))],
- "learner_pi": [("Logistic", LogisticRegressionCV()),
- ("LGBM",
- LGBMClassifier(verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData(datasets[i_rep], 'y', 'd', z_cols='z', s_col='s')
-
- for score_idx, score in enumerate(hyperparam_dict["score"]):
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- for learner_pi_idx, (learner_pi_name, ml_pi) in enumerate(hyperparam_dict["learner_pi"]):
-
- dml_ssm = dml.DoubleMLSSM(
- obj_dml_data=obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- ml_pi=ml_pi,
- score=score,
- )
- dml_ssm.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_ssm.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_ssm.coef[0] - theta),
- "score": score,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "Learner pi": learner_pi_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "Learner pi", "score", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "ssm_nonignorable_ate_coverage.py"
-path = "results/irm/ssm_nonignorable_ate_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/plm/pliv_late.py b/scripts/plm/pliv_late.py
new file mode 100644
index 0000000..f8b957d
--- /dev/null
+++ b/scripts/plm/pliv_late.py
@@ -0,0 +1,13 @@
+from montecover.plm import PLIVLATECoverageSimulation
+
+# Create and run simulation with config file
+sim = PLIVLATECoverageSimulation(
+ config_file="scripts/plm/pliv_late_config.yml",
+ log_level="INFO",
+ log_file="logs/plm/pliv_late_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/plm/", file_prefix="pliv_late")
+
+# Save config file for reproducibility
+sim.save_config("results/plm/pliv_late_config.yml")
diff --git a/scripts/plm/pliv_late_config.yml b/scripts/plm/pliv_late_config.yml
new file mode 100644
index 0000000..975025f
--- /dev/null
+++ b/scripts/plm/pliv_late_config.yml
@@ -0,0 +1,59 @@
+# Simulation parameters for PLR ATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [0.5] # Treatment effect
+ n_obs: [500] # Sample size
+ dim_x: [20] # Number of covariates
+ dim_z: [1] # Number of instruments
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ rf: &rf
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+dml_parameters:
+ # ML methods for ml_g and ml_m
+ learners:
+ - ml_g: *lasso
+ ml_m: *lasso
+ ml_r: *lasso
+ - ml_g: *rf
+ ml_m: *rf
+ ml_r: *rf
+ - ml_g: *lasso
+ ml_m: *rf
+ ml_r: *rf
+ - ml_g: *rf
+ ml_m: *lasso
+ ml_r: *rf
+ - ml_g: *rf
+ ml_m: *rf
+ ml_r: *lasso
+ - ml_g: *lasso
+ ml_m: *lasso
+ ml_r: *rf
+ - ml_g: *rf
+ ml_m: *lasso
+ ml_r: *lasso
+ - ml_g: *lasso
+ ml_m: *rf
+ ml_r: *lasso
+
+ score: ["partialling out", "IV-type"]
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/plm/pliv_late_coverage.py b/scripts/plm/pliv_late_coverage.py
deleted file mode 100644
index 6cc4329..0000000
--- a/scripts/plm/pliv_late_coverage.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from sklearn.ensemble import RandomForestRegressor
-from sklearn.linear_model import LassoCV
-
-import doubleml as dml
-from doubleml.datasets import make_pliv_CHS2015
-
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-theta = 0.5
-n_obs = 500
-dim_x = 20
-dim_z = 1
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-
-datasets = []
-for i in range(n_rep):
- data = make_pliv_CHS2015(alpha=theta, n_obs=n_obs, dim_x=dim_x, dim_z=dim_z, return_type='DataFrame')
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "score": ["partialling out", "IV-type"],
- "learner_g": [("Lasso", LassoCV()),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "learner_m": [("Lasso", LassoCV()),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "learner_r": [("Lasso", LassoCV()),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData(datasets[i_rep], 'y', 'd', z_cols='Z1')
-
- for score_idx, score in enumerate(hyperparam_dict["score"]):
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- for learner_r_idx, (learner_r_name, ml_r) in enumerate(hyperparam_dict["learner_r"]):
- if score == "IV-type":
- # Set machine learning methods for g & m
- dml_pliv = dml.DoubleMLPLIV(
- obj_dml_data=obj_dml_data,
- ml_l=ml_g,
- ml_m=ml_m,
- ml_g=ml_g,
- ml_r=ml_r,
- score="IV-type",
- )
- else:
- # Set machine learning methods for g & m
- dml_pliv = dml.DoubleMLPLIV(
- obj_dml_data=obj_dml_data,
- ml_l=ml_g,
- ml_m=ml_m,
- ml_r=ml_r,
- score=score,
- )
- dml_pliv.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_pliv.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_pliv.coef[0] - theta),
- "score": score,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "Learner r": learner_r_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "Learner r", "score", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "pliv_late_coverage.py"
-path = "results/plm/pliv_late_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/plm/plr_ate.py b/scripts/plm/plr_ate.py
index 09e2531..660779d 100644
--- a/scripts/plm/plr_ate.py
+++ b/scripts/plm/plr_ate.py
@@ -1,11 +1,10 @@
-
from montecover.plm import PLRATECoverageSimulation
# Create and run simulation with config file
sim = PLRATECoverageSimulation(
config_file="scripts/plm/plr_ate_config.yml",
log_level="INFO",
- log_file="logs/plm/plr_ate_sim.log"
+ log_file="logs/plm/plr_ate_sim.log",
)
sim.run_simulation()
sim.save_results(output_path="results/plm/", file_prefix="plr_ate")
diff --git a/scripts/plm/plr_ate_config.yml b/scripts/plm/plr_ate_config.yml
index e2067bc..9d4a1ef 100644
--- a/scripts/plm/plr_ate_config.yml
+++ b/scripts/plm/plr_ate_config.yml
@@ -11,23 +11,43 @@ dgp_parameters:
n_obs: [500] # Sample size
dim_x: [20] # Number of covariates
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ rf: &rf
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 10
+ max_depth: 5
+ min_samples_leaf: 20
+
+ lgbm: &lgbm
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
dml_parameters:
- # ML methods for ml_g and ml_m
learners:
- - ml_g: ["Lasso"]
- ml_m: ["Lasso"]
- - ml_g: ["Random Forest"]
- ml_m: ["Random Forest"]
- - ml_g: ["Lasso"]
- ml_m: ["Random Forest"]
- - ml_g: ["Random Forest"]
- ml_m: ["Lasso"]
- - ml_g: ["LGBM"]
- ml_m: ["LGBM"]
- - ml_g: ["LGBM"]
- ml_m: ["Lasso"]
+ - ml_g: *lasso
+ ml_m: *lasso
+ - ml_g: *rf
+ ml_m: *rf
+ - ml_g: *lasso
+ ml_m: *rf
+ - ml_g: *rf
+ ml_m: *lasso
+ - ml_g: *lgbm
+ ml_m: *lgbm
+ - ml_g: *lgbm
+ ml_m: *lasso
+ - ml_g: *lasso
+ ml_m: *lgbm
score: ["partialling out", "IV-type"]
confidence_parameters:
- level: [0.95, 0.90] # Confidence levels
\ No newline at end of file
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/plm/plr_ate_coverage.py b/scripts/plm/plr_ate_coverage.py
deleted file mode 100644
index ab715d7..0000000
--- a/scripts/plm/plr_ate_coverage.py
+++ /dev/null
@@ -1,128 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from sklearn.ensemble import RandomForestRegressor
-from sklearn.linear_model import LassoCV
-
-import doubleml as dml
-from doubleml.datasets import make_plr_CCDDHNR2018
-
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-theta = 0.5
-n_obs = 500
-dim_x = 20
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-
-datasets = []
-for i in range(n_rep):
- data = make_plr_CCDDHNR2018(alpha=theta, n_obs=n_obs, dim_x=dim_x, return_type='DataFrame')
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "score": ["partialling out", "IV-type"],
- "learner_g": [("Lasso", LassoCV()),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "learner_m": [("Lasso", LassoCV()),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData(datasets[i_rep], 'y', 'd')
-
- for score_idx, score in enumerate(hyperparam_dict["score"]):
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- if score == "IV-type":
- # Set machine learning methods for g & m
- dml_plr = dml.DoubleMLPLR(
- obj_dml_data=obj_dml_data,
- ml_l=ml_g,
- ml_m=ml_m,
- ml_g=ml_g,
- score="IV-type",
- )
- else:
- # Set machine learning methods for g & m
- dml_plr = dml.DoubleMLPLR(
- obj_dml_data=obj_dml_data,
- ml_l=ml_g,
- ml_m=ml_m,
- score=score,
- )
- dml_plr.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = dml_plr.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(dml_plr.coef[0] - theta),
- "score": score,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "score", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "plr_ate_coverage.py"
-path = "results/plm/plr_ate_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/plm/plr_ate_sensitivity.py b/scripts/plm/plr_ate_sensitivity.py
index 34e73dc..2f53b98 100644
--- a/scripts/plm/plr_ate_sensitivity.py
+++ b/scripts/plm/plr_ate_sensitivity.py
@@ -1,173 +1,13 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from sklearn.ensemble import RandomForestRegressor
-from lightgbm import LGBMRegressor
-
-import doubleml as dml
-from doubleml.datasets import make_confounded_plr_data
-
-
-# Number of repetitions
-n_rep = 500
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 1000
-cf_y = 0.1
-cf_d = 0.1
-theta = 5.0
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-
-# test inputs
-dgp_dict = make_confounded_plr_data(n_obs=int(1e+6), cf_y=cf_y, cf_d=cf_d)
-oracle_dict = dgp_dict['oracle_values']
-
-cf_y_test = np.mean(np.square(oracle_dict['g_long'] - oracle_dict['g_short'])) / \
- np.mean(np.square(dgp_dict['y'] - oracle_dict['g_short']))
-print(f'Input cf_y:{cf_y} \nCalculated cf_y: {round(cf_y_test, 5)}')
-
-rr_long = (dgp_dict['d'] - oracle_dict['m_long']) / np.mean(np.square(dgp_dict['d'] - oracle_dict['m_long']))
-rr_short = (dgp_dict['d'] - oracle_dict['m_short']) / np.mean(np.square(dgp_dict['d'] - oracle_dict['m_short']))
-C2_D = (np.mean(np.square(rr_long)) - np.mean(np.square(rr_short))) / np.mean(np.square(rr_short))
-cf_d_test = C2_D / (1 + C2_D)
-print(f'Input cf_d:{cf_d}\nCalculated cf_d: {round(cf_d_test, 5)}')
-
-# compute the value for rho
-rho = np.corrcoef((oracle_dict['g_long'] - oracle_dict['g_short']), (rr_long - rr_short))[0, 1]
-print(f'Correlation rho: {round(rho, 5)}')
-
-datasets = []
-for i in range(n_rep):
- data = make_confounded_plr_data(n_obs=n_obs, cf_y=cf_y, cf_d=cf_d, theta=theta)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "score": ["partialling out", "IV-type"],
- "learner_g": [("LGBM", LGBMRegressor(n_estimators=500, learning_rate=0.05, min_child_samples=5, verbose=-1)),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "learner_m": [("LGBM", LGBMRegressor(n_estimators=500, learning_rate=0.05, min_child_samples=2, verbose=-1)),
- ("Random Forest",
- RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep + 1}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- dgp_dict = datasets[i_rep]
- x_cols = [f'X{i + 1}' for i in np.arange(dgp_dict['x'].shape[1])]
- df = pd.DataFrame(np.column_stack((dgp_dict['x'], dgp_dict['y'], dgp_dict['d'])), columns=x_cols + ['y', 'd'])
- obj_dml_data = dml.DoubleMLData(df, 'y', 'd')
-
- for score_idx, score in enumerate(hyperparam_dict["score"]):
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- if score == "IV-type":
- # Set machine learning methods for g & m
- dml_plr = dml.DoubleMLPLR(
- obj_dml_data=obj_dml_data,
- ml_l=ml_g,
- ml_m=ml_m,
- ml_g=ml_g,
- score="IV-type",
- )
- else:
- # Set machine learning methods for g & m
- dml_plr = dml.DoubleMLPLR(
- obj_dml_data=obj_dml_data,
- ml_l=ml_g,
- ml_m=ml_m,
- score=score,
- )
- dml_plr.fit(n_jobs_cv=5)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
-
- estimate = dml_plr.coef[0]
- confint = dml_plr.confint(level=level)
- coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1])
- ci_length = confint.iloc[0, 1] - confint.iloc[0, 0]
-
- # test sensitivity parameters
- dml_plr.sensitivity_analysis(cf_y=cf_y, cf_d=cf_d, rho=rho, level=level, null_hypothesis=theta)
- cover_lower = theta >= dml_plr.sensitivity_params['ci']['lower']
- cover_upper = theta <= dml_plr.sensitivity_params['ci']['upper']
- rv = dml_plr.sensitivity_params['rv']
- rva = dml_plr.sensitivity_params['rva']
- bias_lower = abs(theta - dml_plr.sensitivity_params['theta']['lower'])
- bias_upper = abs(theta - dml_plr.sensitivity_params['theta']['upper'])
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0],
- "Bias": abs(estimate - theta),
- "Coverage (Lower)": cover_lower.astype(int),
- "Coverage (Upper)": cover_upper.astype(int),
- "RV": rv,
- "RVa": rva,
- "Bias (Lower)": bias_lower,
- "Bias (Upper)": bias_upper,
- "score": score,
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "score", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Coverage (Lower)": "mean",
- "Coverage (Upper)": "mean",
- "RV": "mean",
- "RVa": "mean",
- "Bias (Lower)": "mean",
- "Bias (Upper)": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "plr_ate_sensitivity.py"
-path = "results/plm/plr_ate_sensitivity"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
+from montecover.plm import PLRATESensitivityCoverageSimulation
+
+# Create and run simulation with config file
+sim = PLRATESensitivityCoverageSimulation(
+ config_file="scripts/plm/plr_ate_sensitivity_config.yml",
+ log_level="INFO",
+ log_file="logs/plm/plr_ate_sensitivity_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/plm/", file_prefix="plr_ate_sensitivity")
+
+# Save config file for reproducibility
+sim.save_config("results/plm/plr_ate_sensitivity_config.yml")
diff --git a/scripts/plm/plr_ate_sensitivity_config.yml b/scripts/plm/plr_ate_sensitivity_config.yml
new file mode 100644
index 0000000..1d348f3
--- /dev/null
+++ b/scripts/plm/plr_ate_sensitivity_config.yml
@@ -0,0 +1,53 @@
+# Simulation parameters for PLR ATE Sensitivity Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [0.5] # Treatment effect
+ n_obs: [1000] # Sample size
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ rf: &rf
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 10
+ max_depth: 5
+ min_samples_leaf: 2
+
+ lgbm: &lgbm
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.05
+ min_child_samples: 5
+
+dml_parameters:
+ learners:
+ - ml_g: *lasso
+ ml_m: *lasso
+ - ml_g: *rf
+ ml_m: *rf
+ - ml_g: *lasso
+ ml_m: *rf
+ - ml_g: *rf
+ ml_m: *lasso
+ - ml_g: *lgbm
+ ml_m: *lgbm
+ - ml_g: *lgbm
+ ml_m: *lasso
+ - ml_g: *lasso
+ ml_m: *lgbm
+
+ score: ["partialling out", "IV-type"]
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/plm/plr_cate.py b/scripts/plm/plr_cate.py
new file mode 100644
index 0000000..f9f1f14
--- /dev/null
+++ b/scripts/plm/plr_cate.py
@@ -0,0 +1,13 @@
+from montecover.plm import PLRCATECoverageSimulation
+
+# Create and run simulation with config file
+sim = PLRCATECoverageSimulation(
+ config_file="scripts/plm/plr_cate_config.yml",
+ log_level="INFO",
+ log_file="logs/plm/plr_cate_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/plm/", file_prefix="plr_cate")
+
+# Save config file for reproducibility
+sim.save_config("results/plm/plr_cate_config.yml")
diff --git a/scripts/plm/plr_cate_config.yml b/scripts/plm/plr_cate_config.yml
new file mode 100644
index 0000000..05194b1
--- /dev/null
+++ b/scripts/plm/plr_cate_config.yml
@@ -0,0 +1,54 @@
+# Simulation parameters for PLR CATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [500] # Sample size
+ p: [10] # Number of covariates
+ support_size: [5] # Number of non-zero coefficients
+ n_x: [1]
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ rf: &rf
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 10
+ max_depth: 5
+ min_samples_leaf: 2
+
+ lgbm: &lgbm
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+dml_parameters:
+ learners:
+ - ml_g: *lasso
+ ml_m: *lasso
+ - ml_g: *rf
+ ml_m: *rf
+ - ml_g: *lasso
+ ml_m: *rf
+ - ml_g: *rf
+ ml_m: *lasso
+ - ml_g: *lgbm
+ ml_m: *lgbm
+ - ml_g: *lgbm
+ ml_m: *lasso
+ - ml_g: *lasso
+ ml_m: *lgbm
+
+ score: ["partialling out", "IV-type"]
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/plm/plr_cate_coverage.py b/scripts/plm/plr_cate_coverage.py
deleted file mode 100644
index 5fee83d..0000000
--- a/scripts/plm/plr_cate_coverage.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-import patsy
-
-from lightgbm import LGBMRegressor
-from sklearn.linear_model import LassoCV
-
-import doubleml as dml
-from doubleml.datasets import make_heterogeneous_data
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 2000
-p = 10
-support_size = 5
-n_x = 1
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- data = make_heterogeneous_data(n_obs=n_obs, p=p, support_size=support_size, n_x=n_x, binary_treatment=False)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Lasso", LassoCV()),
- ("LGBM", LGBMRegressor(n_estimators=200, learning_rate=0.05, verbose=-1))],
- "learner_m": [("Lasso", LassoCV()),
- ("LGBM", LGBMRegressor(n_estimators=200, learning_rate=0.05, verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- data = datasets[i_rep]['data']
- design_matrix = patsy.dmatrix("bs(x, df=5, degree=2)", {"x": data["X_0"]})
- spline_basis = pd.DataFrame(design_matrix)
-
- true_effects = datasets[i_rep]['effects']
-
- obj_dml_data = dml.DoubleMLData(data, 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_plr = dml.DoubleMLPLR(
- obj_dml_data=obj_dml_data,
- ml_l=ml_g,
- ml_m=ml_m,
- )
- dml_plr.fit(n_jobs_cv=5)
- cate = dml_plr.cate(spline_basis)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = cate.confint(basis=spline_basis, level=level)
- effects = confint["effect"]
- coverage = (confint.iloc[:, 0] < true_effects) & (true_effects < confint.iloc[:, 2])
- ci_length = confint.iloc[:, 2] - confint.iloc[:, 0]
- confint_uniform = cate.confint(basis=spline_basis, level=0.95, joint=True, n_rep_boot=2000)
- coverage_uniform = all((confint_uniform.iloc[:, 0] < true_effects) &
- (true_effects < confint_uniform.iloc[:, 2]))
- ci_length_uniform = confint_uniform.iloc[:, 2] - confint_uniform.iloc[:, 0]
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.mean(),
- "CI Length": ci_length.mean(),
- "Bias": abs(effects - true_effects).mean(),
- "Uniform Coverage": coverage_uniform,
- "Uniform CI Length": ci_length_uniform.mean(),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "plr_cate_coverage.py"
-path = "results/plm/plr_cate_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/plm/plr_gate.py b/scripts/plm/plr_gate.py
new file mode 100644
index 0000000..75d133f
--- /dev/null
+++ b/scripts/plm/plr_gate.py
@@ -0,0 +1,13 @@
+from montecover.plm import PLRGATECoverageSimulation
+
+# Create and run simulation with config file
+sim = PLRGATECoverageSimulation(
+ config_file="scripts/plm/plr_gate_config.yml",
+ log_level="INFO",
+ log_file="logs/plm/plr_gate_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/plm/", file_prefix="plr_gate")
+
+# Save config file for reproducibility
+sim.save_config("results/plm/plr_gate_config.yml")
diff --git a/scripts/plm/plr_gate_config.yml b/scripts/plm/plr_gate_config.yml
new file mode 100644
index 0000000..81eaee4
--- /dev/null
+++ b/scripts/plm/plr_gate_config.yml
@@ -0,0 +1,54 @@
+# Simulation parameters for PLR GATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [500] # Sample size
+ p: [10] # Number of covariates
+ support_size: [5] # Number of non-zero coefficients
+ n_x: [1]
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ rf: &rf
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 10
+ max_depth: 5
+ min_samples_leaf: 2
+
+ lgbm: &lgbm
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+dml_parameters:
+ learners:
+ - ml_g: *lasso
+ ml_m: *lasso
+ - ml_g: *rf
+ ml_m: *rf
+ - ml_g: *lasso
+ ml_m: *rf
+ - ml_g: *rf
+ ml_m: *lasso
+ - ml_g: *lgbm
+ ml_m: *lgbm
+ - ml_g: *lgbm
+ ml_m: *lasso
+ - ml_g: *lasso
+ ml_m: *lgbm
+
+ score: ["partialling out", "IV-type"]
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/plm/plr_gate_coverage.py b/scripts/plm/plr_gate_coverage.py
deleted file mode 100644
index 7c99669..0000000
--- a/scripts/plm/plr_gate_coverage.py
+++ /dev/null
@@ -1,130 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from lightgbm import LGBMRegressor
-from sklearn.linear_model import LassoCV
-
-import doubleml as dml
-from doubleml.datasets import make_heterogeneous_data
-
-# Number of repetitions
-n_rep = 1000
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 500
-p = 10
-support_size = 5
-n_x = 1
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-datasets = []
-for i in range(n_rep):
- data = make_heterogeneous_data(n_obs=n_obs, p=p, support_size=support_size, n_x=n_x, binary_treatment=False)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "learner_g": [("Lasso", LassoCV()),
- ("LGBM", LGBMRegressor(n_estimators=200, learning_rate=0.05, verbose=-1))],
- "learner_m": [("Lasso", LassoCV()),
- ("LGBM", LGBMRegressor(n_estimators=200, learning_rate=0.05, verbose=-1))],
- "level": [0.95, 0.90]
-}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- # define the DoubleML data object
- data = datasets[i_rep]['data']
- ite = datasets[i_rep]['effects']
-
- groups = pd.DataFrame(
- np.column_stack((data['X_0'] <= 0.3,
- (data['X_0'] > 0.3) & (data['X_0'] <= 0.7),
- data['X_0'] > 0.7)),
- columns=['Group 1', 'Group 2', 'Group 3'])
- true_effects = [ite[groups[group]].mean() for group in groups.columns]
-
- obj_dml_data = dml.DoubleMLData(data, 'y', 'd')
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- # Set machine learning methods for g & m
- dml_plr = dml.DoubleMLPLR(
- obj_dml_data=obj_dml_data,
- ml_l=ml_g,
- ml_m=ml_m,
- )
- dml_plr.fit(n_jobs_cv=5)
- gate = dml_plr.gate(groups=groups)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = gate.confint(level=level)
- effects = confint["effect"]
- coverage = (confint.iloc[:, 0] < true_effects) & (true_effects < confint.iloc[:, 2])
- ci_length = confint.iloc[:, 2] - confint.iloc[:, 0]
- confint_uniform = gate.confint(level=0.95, joint=True, n_rep_boot=2000)
- coverage_uniform = all((confint_uniform.iloc[:, 0] < true_effects) &
- (true_effects < confint_uniform.iloc[:, 2]))
- ci_length_uniform = confint_uniform.iloc[:, 2] - confint_uniform.iloc[:, 0]
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.mean(),
- "CI Length": ci_length.mean(),
- "Bias": abs(effects - true_effects).mean(),
- "Uniform Coverage": coverage_uniform,
- "Uniform CI Length": ci_length_uniform.mean(),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "Uniform Coverage": "mean",
- "Uniform CI Length": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "plr_gate_coverage.py"
-path = "results/plm/plr_gate_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/rdd/rdd_fuzzy.py b/scripts/rdd/rdd_fuzzy.py
new file mode 100644
index 0000000..3fec90e
--- /dev/null
+++ b/scripts/rdd/rdd_fuzzy.py
@@ -0,0 +1,13 @@
+from montecover.rdd import RDDCoverageSimulation
+
+# Create and run simulation with config file
+sim = RDDCoverageSimulation(
+ config_file="scripts/rdd/rdd_fuzzy_config.yml",
+ log_level="INFO",
+ log_file="logs/rdd/rdd_fuzzy_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/rdd/", file_prefix="rdd_fuzzy")
+
+# Save config file for reproducibility
+sim.save_config("results/rdd/rdd_fuzzy_config.yml")
diff --git a/scripts/rdd/rdd_fuzzy_config.yml b/scripts/rdd/rdd_fuzzy_config.yml
new file mode 100644
index 0000000..2e9cdc7
--- /dev/null
+++ b/scripts/rdd/rdd_fuzzy_config.yml
@@ -0,0 +1,70 @@
+# Simulation parameters for fuzzy RDD Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [2000] # Sample size
+ fuzzy: [True]
+ cutoff: [0.0]
+
+# Define reusable learner configurations
+learner_definitions:
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 200
+ learning_rate: 0.02
+ max_depth: 5
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 200
+ learning_rate: 0.02
+ max_depth: 5
+
+ global_linear: &global_linear
+ name: "Global Linear"
+
+ global_logistic: &global_logistic
+ name: "Global Logistic"
+
+ local_linear: &local_linear
+ name: "Linear"
+
+ local_logistic: &local_logistic
+ name: "Logistic"
+
+ stacked_reg: &stacked_reg
+ name: "Stacked Regr."
+ params:
+ n_estimators: 200
+ learning_rate: 0.02
+ max_depth: 5
+
+ stacked_cls: &stacked_cls
+ name: "Stacked Clas."
+ params:
+ n_estimators: 200
+ learning_rate: 0.02
+ max_depth: 5
+
+dml_parameters:
+ fs_specification: ["cutoff", "cutoff and score", "interacted cutoff and score"]
+
+ learners:
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ - ml_g: *global_linear
+ ml_m: *global_logistic
+ - ml_g: *local_linear
+ ml_m: *local_logistic
+ - ml_g: *stacked_reg
+ ml_m: *stacked_cls
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/rdd/rdd_fuzzy_coverage.py b/scripts/rdd/rdd_fuzzy_coverage.py
deleted file mode 100644
index f9773fa..0000000
--- a/scripts/rdd/rdd_fuzzy_coverage.py
+++ /dev/null
@@ -1,177 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from lightgbm import LGBMRegressor, LGBMClassifier
-from sklearn.ensemble import StackingRegressor, StackingClassifier
-from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
-from rdrobust import rdrobust
-
-import doubleml as dml
-from doubleml.rdd import RDFlex
-from doubleml.rdd.datasets import make_simple_rdd_data
-from doubleml.utils import GlobalRegressor, GlobalClassifier
-
-from statsmodels.nonparametric.kernel_regression import KernelReg
-
-
-# Number of repetitions
-n_rep = 500
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 2000
-cutoff = 0
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-
-datasets = []
-for i in range(n_rep):
- data = make_simple_rdd_data(n_obs=n_obs, fuzzy=True, cutoff=cutoff)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "fs_specification": ["cutoff", "cutoff and score", "interacted cutoff and score"],
- "learner_g": [
- ("Linear", LinearRegression()),
- ("LGBM", LGBMRegressor(n_estimators=100, max_depth=5, learning_rate=0.1, verbose=-1)),
- ("Global linear", GlobalRegressor(LinearRegression())),
- ("Stacked", StackingRegressor(
- estimators=[
- ('lr', LinearRegression()),
- ('lgbm', LGBMRegressor(n_estimators=100, max_depth=5, learning_rate=0.1, verbose=-1)),
- ('glr', GlobalRegressor(LinearRegression()))],
- final_estimator=Ridge()))],
- "learner_m": [
- ("Linear", LogisticRegression()),
- ("LGBM", LGBMClassifier(n_estimators=100, max_depth=5, learning_rate=0.1, verbose=-1)),
- ("Global linear", GlobalClassifier(LogisticRegression())),
- ("Stacked", StackingClassifier(
- estimators=[
- ('lr', LogisticRegression()),
- ('lgbm', LGBMClassifier(n_estimators=100, max_depth=5, learning_rate=0.1, verbose=-1)),
- ('glr', GlobalClassifier(LogisticRegression()))],
- final_estimator=LogisticRegression()))],
- "level": [0.95, 0.90]}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- data = datasets[i_rep]
- # get oracle value
- score = data["score"]
- complier_mask = (((data["D"] == 0) & (data["score"] < cutoff)) | ((data["D"] == 1) & (data["score"] > cutoff)))
-
- ite = data["oracle_values"]['Y1'] - data["oracle_values"]['Y0']
- kernel_reg = KernelReg(endog=ite[complier_mask], exog=score[complier_mask], var_type='c', reg_type='ll')
- effect_at_cutoff, _ = kernel_reg.fit(np.array([cutoff]))
- oracle_effect = effect_at_cutoff[0]
-
- Y = data["Y"]
- Z = data["X"].reshape(n_obs, -1)
- D = data["D"]
-
- # baseline
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- res = rdrobust(y=Y, x=score, fuzzy=D, covs=Z, c=cutoff, level=level*100)
- coef = res.coef.loc["Robust", "Coeff"]
- ci_lower = res.ci.loc["Robust", "CI Lower"]
- ci_upper = res.ci.loc["Robust", "CI Upper"]
-
- coverage = (ci_lower < oracle_effect) & (oracle_effect < ci_upper)
- ci_length = ci_upper - ci_lower
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": ci_length,
- "Bias": abs(coef - oracle_effect),
- "Learner g": "linear",
- "Learner m": "linear",
- "Method": "rdrobust",
- "fs specification": "cutoff",
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData.from_arrays(y=Y, d=D, x=Z, s=score)
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for learner_m_idx, (learner_m_name, ml_m) in enumerate(hyperparam_dict["learner_m"]):
- for fs_specification_idx, fs_specification in enumerate(hyperparam_dict["fs_specification"]):
- rdflex_model = RDFlex(
- obj_dml_data,
- ml_g=ml_g,
- ml_m=ml_m,
- n_folds=5,
- n_rep=1,
- cutoff=cutoff,
- fuzzy=True,
- fs_specification=fs_specification)
- rdflex_model.fit(n_iterations=2)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = rdflex_model.confint(level=level)
- coverage = (confint.iloc[2, 0] < oracle_effect) & (oracle_effect < confint.iloc[2, 1])
- ci_length = confint.iloc[2, 1] - confint.iloc[2, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": ci_length,
- "Bias": abs(rdflex_model.coef[2] - oracle_effect),
- "Learner g": learner_g_name,
- "Learner m": learner_m_name,
- "Method": "rdflex",
- "fs specification": fs_specification,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Method", "fs specification", "Learner g", "Learner m", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "rdd_fuzzy_coverage.py"
-path = "results/rdd/rdd_fuzzy_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/rdd/rdd_sharp.py b/scripts/rdd/rdd_sharp.py
new file mode 100644
index 0000000..3520881
--- /dev/null
+++ b/scripts/rdd/rdd_sharp.py
@@ -0,0 +1,13 @@
+from montecover.rdd import RDDCoverageSimulation
+
+# Create and run simulation with config file
+sim = RDDCoverageSimulation(
+ config_file="scripts/rdd/rdd_sharp_config.yml",
+ log_level="INFO",
+ log_file="logs/rdd/rdd_sharp_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/rdd/", file_prefix="rdd_sharp")
+
+# Save config file for reproducibility
+sim.save_config("results/rdd/rdd_sharp_config.yml")
diff --git a/scripts/rdd/rdd_sharp_config.yml b/scripts/rdd/rdd_sharp_config.yml
new file mode 100644
index 0000000..560c913
--- /dev/null
+++ b/scripts/rdd/rdd_sharp_config.yml
@@ -0,0 +1,45 @@
+# Simulation parameters for sharp RDD Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ n_obs: [1000] # Sample size
+ fuzzy: [False]
+ cutoff: [0.0]
+
+# Define reusable learner configurations
+learner_definitions:
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 100
+ learning_rate: 0.05
+
+ global_linear: &global_linear
+ name: "Global Linear"
+
+ local_linear: &local_linear
+ name: "Linear"
+
+ stacked_reg: &stacked_reg
+ name: "Stacked Regr."
+ params:
+ n_estimators: 100
+ learning_rate: 0.05
+
+dml_parameters:
+ fs_specification: ["cutoff", "cutoff and score", "interacted cutoff and score"]
+
+ learners:
+ - ml_g: *lgbmr
+ - ml_g: *global_linear
+ - ml_g: *local_linear
+ - ml_g: *stacked_reg
+
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/rdd/rdd_sharp_coverage.py b/scripts/rdd/rdd_sharp_coverage.py
deleted file mode 100644
index bff96f9..0000000
--- a/scripts/rdd/rdd_sharp_coverage.py
+++ /dev/null
@@ -1,162 +0,0 @@
-import numpy as np
-import pandas as pd
-from datetime import datetime
-import time
-import sys
-
-from lightgbm import LGBMRegressor
-from sklearn.ensemble import StackingRegressor
-from sklearn.linear_model import LinearRegression, Ridge
-from rdrobust import rdrobust
-
-import doubleml as dml
-from doubleml.rdd import RDFlex
-from doubleml.rdd.datasets import make_simple_rdd_data
-from doubleml.utils import GlobalRegressor
-
-from statsmodels.nonparametric.kernel_regression import KernelReg
-
-
-# Number of repetitions
-n_rep = 500
-max_runtime = 5.5 * 3600 # 5.5 hours in seconds
-
-# DGP pars
-n_obs = 1000
-cutoff = 0
-
-# to get the best possible comparison between different learners (and settings) we first simulate all datasets
-np.random.seed(42)
-
-datasets = []
-for i in range(n_rep):
- data = make_simple_rdd_data(n_obs=n_obs, fuzzy=False, cutoff=cutoff)
- datasets.append(data)
-
-# set up hyperparameters
-hyperparam_dict = {
- "fs_specification": ["cutoff", "cutoff and score", "interacted cutoff and score"],
- "learner_g": [
- ("Linear", LinearRegression()),
- ("LGBM", LGBMRegressor(n_estimators=100, max_depth=5, learning_rate=0.1, verbose=-1)),
- ("Global linear", GlobalRegressor(LinearRegression())),
- ("Stacked", StackingRegressor(
- estimators=[
- ('lr', LinearRegression()),
- ('lgbm', LGBMRegressor(n_estimators=100, max_depth=5, learning_rate=0.1, verbose=-1)),
- ('glr', GlobalRegressor(LinearRegression()))],
- final_estimator=Ridge()))],
- "level": [0.95, 0.90]}
-
-# set up the results dataframe
-df_results_detailed = pd.DataFrame()
-
-# start simulation
-np.random.seed(42)
-start_time = time.time()
-
-for i_rep in range(n_rep):
- print(f"Repetition: {i_rep}/{n_rep}", end="\r")
-
- # Check the elapsed time
- elapsed_time = time.time() - start_time
- if elapsed_time > max_runtime:
- print("Maximum runtime exceeded. Stopping the simulation.")
- break
-
- data = datasets[i_rep]
- # get oracle value
- score = data["score"]
- ite = data["oracle_values"]['Y1'] - data["oracle_values"]['Y0']
-
- kernel_reg = KernelReg(endog=ite, exog=score, var_type='c', reg_type='ll')
- effect_at_cutoff, _ = kernel_reg.fit(np.array([cutoff]))
- oracle_effect = effect_at_cutoff[0]
-
- Y = data["Y"]
- Z = data["X"].reshape(n_obs, -1)
- D = data["D"]
-
- # baseline
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- res = rdrobust(y=Y, x=score, covs=Z, c=cutoff, level=level*100)
- coef = res.coef.loc["Robust", "Coeff"]
- ci_lower = res.ci.loc["Robust", "CI Lower"]
- ci_upper = res.ci.loc["Robust", "CI Upper"]
-
- coverage = (ci_lower < oracle_effect) & (oracle_effect < ci_upper)
- ci_length = ci_upper - ci_lower
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": ci_length,
- "Bias": abs(coef - oracle_effect),
- "Learner g": "linear",
- "Method": "rdrobust",
- "fs specification": "cutoff",
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
- # define the DoubleML data object
- obj_dml_data = dml.DoubleMLData.from_arrays(y=Y, d=D, x=Z, s=score)
-
- for learner_g_idx, (learner_g_name, ml_g) in enumerate(hyperparam_dict["learner_g"]):
- for fs_specification_idx, fs_specification in enumerate(hyperparam_dict["fs_specification"]):
- rdflex_model = RDFlex(
- obj_dml_data,
- ml_g=ml_g,
- n_folds=5,
- n_rep=1,
- cutoff=cutoff,
- fuzzy=False,
- fs_specification=fs_specification)
- rdflex_model.fit(n_iterations=2)
-
- for level_idx, level in enumerate(hyperparam_dict["level"]):
- confint = rdflex_model.confint(level=level)
- coverage = (confint.iloc[2, 0] < oracle_effect) & (oracle_effect < confint.iloc[2, 1])
- ci_length = confint.iloc[2, 1] - confint.iloc[2, 0]
-
- df_results_detailed = pd.concat(
- (df_results_detailed,
- pd.DataFrame({
- "Coverage": coverage.astype(int),
- "CI Length": ci_length,
- "Bias": abs(rdflex_model.coef[2] - oracle_effect),
- "Learner g": learner_g_name,
- "Method": "rdflex",
- "fs specification": fs_specification,
- "level": level,
- "repetition": i_rep}, index=[0])),
- ignore_index=True)
-
-df_results = df_results_detailed.groupby(
- ["Method", "fs specification", "Learner g", "level"]).agg(
- {"Coverage": "mean",
- "CI Length": "mean",
- "Bias": "mean",
- "repetition": "count"}
- ).reset_index()
-print(df_results)
-
-end_time = time.time()
-total_runtime = end_time - start_time
-
-# save results
-script_name = "rdd_sharp_coverage.py"
-path = "results/rdd/rdd_sharp_coverage"
-
-metadata = pd.DataFrame({
- 'DoubleML Version': [dml.__version__],
- 'Script': [script_name],
- 'Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
- 'Total Runtime (seconds)': [total_runtime],
- 'Python Version': [f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"],
-})
-print(metadata)
-
-df_results.to_csv(f"{path}.csv", index=False)
-metadata.to_csv(f"{path}_metadata.csv", index=False)
diff --git a/scripts/ssm/ssm_mar_ate.py b/scripts/ssm/ssm_mar_ate.py
new file mode 100644
index 0000000..d22c31c
--- /dev/null
+++ b/scripts/ssm/ssm_mar_ate.py
@@ -0,0 +1,13 @@
+from montecover.ssm import SSMMarATECoverageSimulation
+
+# Create and run simulation with config file
+sim = SSMMarATECoverageSimulation(
+ config_file="scripts/ssm/ssm_mar_ate_config.yml",
+ log_level="INFO",
+ log_file="logs/ssm/ssm_mar_ate_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/ssm/", file_prefix="ssm_mar_ate")
+
+# Save config file for reproducibility
+sim.save_config("results/ssm/ssm_mar_ate_config.yml")
diff --git a/scripts/ssm/ssm_mar_ate_config.yml b/scripts/ssm/ssm_mar_ate_config.yml
new file mode 100644
index 0000000..ca85751
--- /dev/null
+++ b/scripts/ssm/ssm_mar_ate_config.yml
@@ -0,0 +1,82 @@
+# Simulation parameters for IRM ATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [1.0] # Treatment effect
+ n_obs: [500] # Sample size
+ dim_x: [20] # Number of covariates
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ logit: &logit
+ name: "Logistic"
+
+ rfr: &rfr
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ rfc: &rfc
+ name: "RF Clas."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+dml_parameters:
+ learners:
+ - ml_g: *lasso
+ ml_m: *logit
+ ml_pi: *logit
+ - ml_g: *rfr
+ ml_m: *rfc
+ ml_pi: *rfc
+ - ml_g: *lasso
+ ml_m: *rfc
+ ml_pi: *rfc
+ - ml_g: *rfr
+ ml_m: *logit
+ ml_pi: *rfc
+ - ml_g: *rfr
+ ml_m: *rfc
+ ml_pi: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ ml_pi: *lgbmc
+ - ml_g: *lasso
+ ml_m: *lgbmc
+ ml_pi: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ ml_pi: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ ml_pi: *logit
+
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels
diff --git a/scripts/ssm/ssm_nonig_ate.py b/scripts/ssm/ssm_nonig_ate.py
new file mode 100644
index 0000000..2609915
--- /dev/null
+++ b/scripts/ssm/ssm_nonig_ate.py
@@ -0,0 +1,13 @@
+from montecover.ssm import SSMNonIgnorableATECoverageSimulation
+
+# Create and run simulation with config file
+sim = SSMNonIgnorableATECoverageSimulation(
+ config_file="scripts/ssm/ssm_nonig_ate_config.yml",
+ log_level="INFO",
+ log_file="logs/ssm/ssm_nonig_ate_sim.log",
+)
+sim.run_simulation()
+sim.save_results(output_path="results/ssm/", file_prefix="ssm_nonig_ate")
+
+# Save config file for reproducibility
+sim.save_config("results/ssm/ssm_nonig_ate_config.yml")
diff --git a/scripts/ssm/ssm_nonig_ate_config.yml b/scripts/ssm/ssm_nonig_ate_config.yml
new file mode 100644
index 0000000..ca85751
--- /dev/null
+++ b/scripts/ssm/ssm_nonig_ate_config.yml
@@ -0,0 +1,82 @@
+# Simulation parameters for IRM ATE Coverage
+
+simulation_parameters:
+ repetitions: 1000
+ max_runtime: 19800 # 5.5 hours in seconds
+ random_seed: 42
+ n_jobs: -2
+
+dgp_parameters:
+ theta: [1.0] # Treatment effect
+ n_obs: [500] # Sample size
+ dim_x: [20] # Number of covariates
+
+# Define reusable learner configurations
+learner_definitions:
+ lasso: &lasso
+ name: "LassoCV"
+
+ logit: &logit
+ name: "Logistic"
+
+ rfr: &rfr
+ name: "RF Regr."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ rfc: &rfc
+ name: "RF Clas."
+ params:
+ n_estimators: 200
+ max_features: 20
+ max_depth: 5
+ min_samples_leaf: 2
+
+ lgbmr: &lgbmr
+ name: "LGBM Regr."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+ lgbmc: &lgbmc
+ name: "LGBM Clas."
+ params:
+ n_estimators: 500
+ learning_rate: 0.01
+
+dml_parameters:
+ learners:
+ - ml_g: *lasso
+ ml_m: *logit
+ ml_pi: *logit
+ - ml_g: *rfr
+ ml_m: *rfc
+ ml_pi: *rfc
+ - ml_g: *lasso
+ ml_m: *rfc
+ ml_pi: *rfc
+ - ml_g: *rfr
+ ml_m: *logit
+ ml_pi: *rfc
+ - ml_g: *rfr
+ ml_m: *rfc
+ ml_pi: *logit
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ ml_pi: *lgbmc
+ - ml_g: *lasso
+ ml_m: *lgbmc
+ ml_pi: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *logit
+ ml_pi: *lgbmc
+ - ml_g: *lgbmr
+ ml_m: *lgbmc
+ ml_pi: *logit
+
+
+confidence_parameters:
+ level: [0.95, 0.90] # Confidence levels