Skip to content

Commit

Permalink
fix linting and formular
Browse files Browse the repository at this point in the history
  • Loading branch information
kgao committed May 1, 2023
1 parent ce4fec5 commit efa09e4
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 14 deletions.
16 changes: 7 additions & 9 deletions econml/score/drscorer.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,13 @@


class DRScorer:
""" Scorer based on the DRLearner loss. Fits regression model g (using T-Learner) and propensity model p at fit time
and calculates the regression and propensity of the evaluation data::
""" Scorer based on the DRLearner loss. Fits regression model g (using T-Learner) and propensity model p at fit
time and calculates the regression and propensity of the evaluation data::
g (model_regression) = E[Y | X, W, T]
p (model_propensity) = Pr[T | X, W]
Ydr(g,p) = g + (Y - g ) / p * T
Ydr(g,p) = g(X,W,T) + (Y - g(X,W,T)) / p_T(X,W)
Then for any given cate model calculates the loss::
Expand Down Expand Up @@ -206,17 +205,16 @@ def score(self, cate_model):
score : double
An analogue of the DR-square loss for the causal setting.
"""
g, p = self.drlearner_._cached_values.nuisances
Y = self.drlearner_._cached_values.Y
T = self.drlearner_._cached_values.T
Ydr = g + (Y - g) / p * T
Y = self.drlearner_._cached_values.Y
T = self.drlearner_._cached_values.T
Y_pred, _ = self.drlearner_._cached_values.nuisances
Ydr = Y_pred[..., 1:] - Y_pred[..., [0]]
X = self.drlearner_._cached_values.W[:, :self.dx_]
sample_weight = self.drlearner_._cached_values.sample_weight
if Ydr.ndim == 1:
Ydr = Ydr.reshape((-1, 1))

effects = cate_model.const_marginal_effect(X).reshape((-1, Ydr.shape[1]))

if sample_weight is not None:
return 1 - np.mean(np.average((Ydr - effects)**2, weights=sample_weight, axis=0)) / self.base_score_
else:
Expand Down
9 changes: 4 additions & 5 deletions econml/tests/test_drscorer.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,8 @@ def _get_data(self):
X = np.random.normal(size=(1000, 3))
T = np.random.binomial(2, scipy.special.expit(X[:, 0]))
sigma = 0.001
y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(0, sigma, size=(1000,))
y = (1 + .5 * X[:, 0]) * T + X[:, 0] + np.random.normal(0, sigma, size=(1000,))
return y, T, X, X[:, 0]


def test_comparison(self):
def reg():
Expand All @@ -53,7 +52,7 @@ def clf():
('dalearner', DomainAdaptationLearner(models=reg(), final_models=reg(), propensity_model=clf())),
('slearner', SLearner(overall_model=reg())),
('tlearner', TLearner(models=reg())),
('drlearner', DRLearner(model_propensity='auto',model_regression='auto',
('drlearner', DRLearner(model_propensity='auto', model_regression='auto',
model_final=reg(), cv=3)),
('rlearner', NonParamDML(model_y=reg(), model_t=clf(), model_final=reg(),
discrete_treatment=True, cv=3)),
Expand All @@ -72,8 +71,8 @@ def clf():
multitask_model_final=False,
featurizer=None,
min_propensity=1e-6,
cv=3,
mc_iters=2,
cv=3,
mc_iters=2,
mc_agg='median')
scorer.fit(Y_val, T_val, X=X_val)
rscore = [scorer.score(mdl) for _, mdl in models]
Expand Down

0 comments on commit efa09e4

Please sign in to comment.