@@ -31,7 +31,7 @@ The non-uniform results (coverage, ci length and bias) refer to averaged values
31
31
32
32
``` {python}
33
33
#| echo: false
34
- metadata_file = '../../results/irm/pq_coverage_metadata .csv'
34
+ metadata_file = '../../results/irm/pq_metadata .csv'
35
35
metadata_df = pd.read_csv(metadata_file)
36
36
print(metadata_df.T.to_string(header=False))
37
37
```
@@ -42,7 +42,7 @@ print(metadata_df.T.to_string(header=False))
42
42
#| echo: false
43
43
44
44
# set up data
45
- df_qte = pd.read_csv("../../results/irm/pq_coverage_qte .csv", index_col=None)
45
+ df_qte = pd.read_csv("../../results/irm/pq_effect_coverage .csv", index_col=None)
46
46
47
47
assert df_qte["repetition"].nunique() == 1
48
48
n_rep_qte = df_qte["repetition"].unique()[0]
@@ -85,7 +85,7 @@ generate_and_show_styled_table(
85
85
#| echo: false
86
86
87
87
# set up data
88
- df_pq0 = pd.read_csv("../../results/irm/pq_coverage_pq0 .csv", index_col=None)
88
+ df_pq0 = pd.read_csv("../../results/irm/pq_Y0_coverage .csv", index_col=None)
89
89
90
90
assert df_pq0["repetition"].nunique() == 1
91
91
n_rep_pq0 = df_pq0["repetition"].unique()[0]
@@ -125,7 +125,7 @@ generate_and_show_styled_table(
125
125
#| echo: false
126
126
127
127
# set up data and rename columns
128
- df_pq1 = pd.read_csv("../../results/irm/pq_coverage_pq1 .csv", index_col=None)
128
+ df_pq1 = pd.read_csv("../../results/irm/pq_Y1_coverage .csv", index_col=None)
129
129
130
130
assert df_pq1["repetition"].nunique() == 1
131
131
n_rep_pq1 = df_pq1["repetition"].unique()[0]
@@ -161,15 +161,15 @@ generate_and_show_styled_table(
161
161
162
162
## LQTE
163
163
164
- The results are based on a location-scale model as described the corresponding [ Example] ( https://docs.doubleml.org/stable/examples/py_double_ml_pq.html#Local-Potential-Quantiles-(LPQs) ) with $10 ,000$ observations.
164
+ The results are based on a location-scale model as described the corresponding [ Example] ( https://docs.doubleml.org/stable/examples/py_double_ml_pq.html#Local-Potential-Quantiles-(LPQs) ) with $5 ,000$ observations.
165
165
166
166
The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals).
167
167
168
168
::: {.callout-note title="Metadata" collapse="true"}
169
169
170
170
``` {python}
171
171
#| echo: false
172
- metadata_file = '../../results/irm/lpq_coverage_metadata .csv'
172
+ metadata_file = '../../results/irm/lpq_metadata .csv'
173
173
metadata_df = pd.read_csv(metadata_file)
174
174
print(metadata_df.T.to_string(header=False))
175
175
```
@@ -180,7 +180,7 @@ print(metadata_df.T.to_string(header=False))
180
180
#| echo: false
181
181
182
182
# set up data
183
- df_lqte = pd.read_csv("../../results/irm/lpq_coverage_lqte .csv", index_col=None)
183
+ df_lqte = pd.read_csv("../../results/irm/lpq_effect_coverage .csv", index_col=None)
184
184
185
185
assert df_lqte["repetition"].nunique() == 1
186
186
n_rep_lqte = df_lqte["repetition"].unique()[0]
@@ -222,7 +222,7 @@ generate_and_show_styled_table(
222
222
#| echo: false
223
223
224
224
# set up data
225
- df_lpq0 = pd.read_csv("../../results/irm/lpq_coverage_lpq0 .csv", index_col=None)
225
+ df_lpq0 = pd.read_csv("../../results/irm/lpq_Y0_coverage .csv", index_col=None)
226
226
227
227
assert df_lpq0["repetition"].nunique() == 1
228
228
n_rep_lpq0 = df_lpq0["repetition"].unique()[0]
@@ -262,7 +262,7 @@ generate_and_show_styled_table(
262
262
#| echo: false
263
263
264
264
# set up data
265
- df_lpq1 = pd.read_csv("../../results/irm/lpq_coverage_lpq1 .csv", index_col=None)
265
+ df_lpq1 = pd.read_csv("../../results/irm/lpq_Y1_coverage .csv", index_col=None)
266
266
267
267
assert df_lpq1["repetition"].nunique() == 1
268
268
n_rep_lpq1 = df_lpq1["repetition"].unique()[0]
@@ -306,7 +306,7 @@ The non-uniform results (coverage, ci length and bias) refer to averaged values
306
306
307
307
``` {python}
308
308
#| echo: false
309
- metadata_file = '../../results/irm/cvar_coverage_metadata .csv'
309
+ metadata_file = '../../results/irm/cvar_metadata .csv'
310
310
metadata_df = pd.read_csv(metadata_file)
311
311
print(metadata_df.T.to_string(header=False))
312
312
```
@@ -317,7 +317,7 @@ print(metadata_df.T.to_string(header=False))
317
317
#| echo: false
318
318
319
319
# set up data
320
- df_cvar_qte = pd.read_csv("../../results/irm/cvar_coverage_qte .csv", index_col=None)
320
+ df_cvar_qte = pd.read_csv("../../results/irm/cvar_effect_coverage .csv", index_col=None)
321
321
322
322
assert df_cvar_qte["repetition"].nunique() == 1
323
323
n_rep_cvar_qte = df_cvar_qte["repetition"].unique()[0]
@@ -359,7 +359,7 @@ generate_and_show_styled_table(
359
359
#| echo: false
360
360
361
361
# set up data
362
- df_cvar_pq0 = pd.read_csv("../../results/irm/cvar_coverage_pq0 .csv", index_col=None)
362
+ df_cvar_pq0 = pd.read_csv("../../results/irm/cvar_Y0_coverage .csv", index_col=None)
363
363
364
364
assert df_cvar_pq0["repetition"].nunique() == 1
365
365
n_rep_cvar_pq0 = df_cvar_pq0["repetition"].unique()[0]
@@ -399,7 +399,7 @@ generate_and_show_styled_table(
399
399
#| echo: false
400
400
401
401
# set up data
402
- df_cvar_pq1 = pd.read_csv("../../results/irm/cvar_coverage_pq1 .csv", index_col=None)
402
+ df_cvar_pq1 = pd.read_csv("../../results/irm/cvar_Y1_coverage .csv", index_col=None)
403
403
404
404
assert df_cvar_pq1["repetition"].nunique() == 1
405
405
n_rep_cvar_pq1 = df_cvar_pq1["repetition"].unique()[0]
0 commit comments