From 0b326407a57724791c9304d9c32134257d99d1a5 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 30 Nov 2021 09:24:03 +0000 Subject: [PATCH 01/49] Add support for subnetwork Laplace approximation --- laplace/baselaplace.py | 2 +- laplace/curvature/asdl.py | 20 ++++-- laplace/curvature/backpack.py | 17 +++-- laplace/curvature/curvature.py | 23 +++++-- laplace/laplace.py | 5 +- laplace/subnetlaplace.py | 120 +++++++++++++++++++++++++++++++++ 6 files changed, 169 insertions(+), 18 deletions(-) create mode 100644 laplace/subnetlaplace.py diff --git a/laplace/baselaplace.py b/laplace/baselaplace.py index 527a130c..4b9ce704 100644 --- a/laplace/baselaplace.py +++ b/laplace/baselaplace.py @@ -560,7 +560,7 @@ def predictive_samples(self, x, pred_type='glm', n_samples=100): @torch.enable_grad() def _glm_predictive_distribution(self, X): - Js, f_mu = self.backend.jacobians(self.model, X) + Js, f_mu = self.backend.jacobians(self.model, X, self.backend.subnetwork_indices) f_var = self.functional_variance(Js) return f_mu.detach(), f_var.detach() diff --git a/laplace/curvature/asdl.py b/laplace/curvature/asdl.py index c5307484..afe3e673 100644 --- a/laplace/curvature/asdl.py +++ b/laplace/curvature/asdl.py @@ -15,13 +15,13 @@ class AsdlInterface(CurvatureInterface): """Interface for asdfghjkl backend. """ - def __init__(self, model, likelihood, last_layer=False): + def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None): if likelihood != 'classification': raise ValueError('This backend only supports classification currently.') - super().__init__(model, likelihood, last_layer) + super().__init__(model, likelihood, last_layer, subnetwork_indices) @staticmethod - def jacobians(model, x): + def jacobians(model, x, subnetwork_indices=None): """Compute Jacobians \\(\\nabla_\\theta f(x;\\theta)\\) at current parameter \\(\\theta\\) using asdfghjkl's gradient per output dimension. @@ -30,6 +30,9 @@ def jacobians(model, x): model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. + subnetwork_indices : torch.Tensor, default=None + indices of the vectorized model parameters that define the subnetwork + to apply the Laplace approximation over Returns ------- @@ -44,7 +47,10 @@ def loss_fn(outputs, targets): return outputs[:, i].sum() f = batch_gradient(model, loss_fn, x, None).detach() - Js.append(_get_batch_grad(model)) + Jk = _get_batch_grad(model) + if subnetwork_indices is not None: + Jk = Jk[:, subnetwork_indices] + Js.append(Jk) Js = torch.stack(Js, dim=1) return Js, f @@ -66,6 +72,8 @@ def gradients(self, x, y): """ f = batch_gradient(self.model, self.lossfunc, x, y).detach() Gs = _get_batch_grad(self._model) + if self.subnetwork_indices is not None: + Gs = Gs[:, self.subnetwork_indices] loss = self.lossfunc(f, y) return Gs, loss @@ -134,8 +142,8 @@ def kron(self, X, y, N, **wkwargs) -> [torch.Tensor, Kron]: class AsdlGGN(AsdlInterface, GGNInterface): """Implementation of the `GGNInterface` using asdfghjkl. """ - def __init__(self, model, likelihood, last_layer=False, stochastic=False): - super().__init__(model, likelihood, last_layer) + def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False): + super().__init__(model, likelihood, last_layer, subnetwork_indices) self.stochastic = stochastic @property diff --git a/laplace/curvature/backpack.py b/laplace/curvature/backpack.py index 885ee2b9..6e655944 100644 --- a/laplace/curvature/backpack.py +++ b/laplace/curvature/backpack.py @@ -11,13 +11,13 @@ class BackPackInterface(CurvatureInterface): """Interface for Backpack backend. """ - def __init__(self, model, likelihood, last_layer=False): - super().__init__(model, likelihood, last_layer) + def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None): + super().__init__(model, likelihood, last_layer, subnetwork_indices) extend(self._model) extend(self.lossfunc) @staticmethod - def jacobians(model, x): + def jacobians(model, x, subnetwork_indices=None): """Compute Jacobians \\(\\nabla_{\\theta} f(x;\\theta)\\) at current parameter \\(\\theta\\) using backpack's BatchGrad per output dimension. @@ -26,6 +26,9 @@ def jacobians(model, x): model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. + subnetwork_indices : torch.Tensor, default=None + indices of the vectorized model parameters that define the subnetwork + to apply the Laplace approximation over Returns ------- @@ -49,6 +52,8 @@ def jacobians(model, x): to_cat.append(param.grad_batch.detach().reshape(x.shape[0], -1)) delattr(param, 'grad_batch') Jk = torch.cat(to_cat, dim=1) + if subnetwork_indices is not None: + Jk = Jk[:, subnetwork_indices] to_stack.append(Jk) if i == 0: f = out.detach() @@ -83,14 +88,16 @@ def gradients(self, x, y): loss.backward() Gs = torch.cat([p.grad_batch.data.flatten(start_dim=1) for p in self._model.parameters()], dim=1) + if self.subnetwork_indices is not None: + Gs = Gs[:, self.subnetwork_indices] return Gs, loss class BackPackGGN(BackPackInterface, GGNInterface): """Implementation of the `GGNInterface` using Backpack. """ - def __init__(self, model, likelihood, last_layer=False, stochastic=False): - super().__init__(model, likelihood, last_layer) + def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False): + super().__init__(model, likelihood, last_layer, subnetwork_indices) self.stochastic = stochastic def _get_diag_ggn(self): diff --git a/laplace/curvature/curvature.py b/laplace/curvature/curvature.py index 363e51d1..735fd74d 100644 --- a/laplace/curvature/curvature.py +++ b/laplace/curvature/curvature.py @@ -16,6 +16,9 @@ class CurvatureInterface: likelihood : {'classification', 'regression'} last_layer : bool, default=False only consider curvature of last layer + subnetwork_indices : torch.Tensor, default=None + indices of the vectorized model parameters that define the subnetwork + to apply the Laplace approximation over Attributes ---------- @@ -24,11 +27,12 @@ class CurvatureInterface: conversion factor between torch losses and base likelihoods For example, \\(\\frac{1}{2}\\) to get to \\(\\mathcal{N}(f, 1)\\) from MSELoss. """ - def __init__(self, model, likelihood, last_layer=False): + def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None): assert likelihood in ['regression', 'classification'] self.likelihood = likelihood self.model = model self.last_layer = last_layer + self.subnetwork_indices = subnetwork_indices if likelihood == 'regression': self.lossfunc = MSELoss(reduction='sum') self.factor = 0.5 @@ -41,7 +45,7 @@ def _model(self): return self.model.last_layer if self.last_layer else self.model @staticmethod - def jacobians(model, x): + def jacobians(model, x, subnetwork_indices=None): """Compute Jacobians \\(\\nabla_\\theta f(x;\\theta)\\) at current parameter \\(\\theta\\). Parameters @@ -49,6 +53,9 @@ def jacobians(model, x): model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. + subnetwork_indices : torch.Tensor, default=None + indices of the vectorized model parameters that define the subnetwork + to apply the Laplace approximation over Returns ------- @@ -180,12 +187,15 @@ class GGNInterface(CurvatureInterface): likelihood : {'classification', 'regression'} last_layer : bool, default=False only consider curvature of last layer + subnetwork_indices : torch.Tensor, default=None + indices of the vectorized model parameters that define the subnetwork + to apply the Laplace approximation over stochastic : bool, default=False Fisher if stochastic else GGN """ - def __init__(self, model, likelihood, last_layer=False, stochastic=False): + def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False): self.stochastic = stochastic - super().__init__(model, likelihood, last_layer) + super().__init__(model, likelihood, last_layer, subnetwork_indices) def _get_full_ggn(self, Js, f, y): """Compute full GGN from Jacobians. @@ -239,7 +249,7 @@ def full(self, x, y, **kwargs): if self.last_layer: Js, f = self.last_layer_jacobians(self.model, x) else: - Js, f = self.jacobians(self.model, x) + Js, f = self.jacobians(self.model, x, self.subnetwork_indices) loss, H_ggn = self._get_full_ggn(Js, f, y) return loss, H_ggn @@ -256,6 +266,9 @@ class EFInterface(CurvatureInterface): likelihood : {'classification', 'regression'} last_layer : bool, default=False only consider curvature of last layer + subnetwork_indices : torch.Tensor, default=None + indices of the vectorized model parameters that define the subnetwork + to apply the Laplace approximation over Attributes ---------- diff --git a/laplace/laplace.py b/laplace/laplace.py index 98f11cad..24241f17 100644 --- a/laplace/laplace.py +++ b/laplace/laplace.py @@ -10,7 +10,7 @@ def Laplace(model, likelihood, subset_of_weights='last_layer', hessian_structure ---------- model : torch.nn.Module likelihood : {'classification', 'regression'} - subset_of_weights : {'last_layer', 'all'}, default='last_layer' + subset_of_weights : {'last_layer', 'subnetwork', 'all'}, default='last_layer' subset of weights to consider for inference hessian_structure : {'diag', 'kron', 'full'}, default='kron' structure of the Hessian approximation @@ -20,6 +20,9 @@ def Laplace(model, likelihood, subset_of_weights='last_layer', hessian_structure laplace : ParametricLaplace chosen subclass of ParametricLaplace instantiated with additional arguments """ + if subset_of_weights == 'subnetwork' and hessian_structure != 'full': + raise ValueError('Subnetwork Laplace requires using a full Hessian approximation!') + laplace_map = {subclass._key: subclass for subclass in _all_subclasses(ParametricLaplace) if hasattr(subclass, '_key')} laplace_class = laplace_map[(subset_of_weights, hessian_structure)] diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py new file mode 100644 index 00000000..30bf15d2 --- /dev/null +++ b/laplace/subnetlaplace.py @@ -0,0 +1,120 @@ +import torch + +from laplace.baselaplace import ParametricLaplace, FullLaplace + +from laplace.curvature import BackPackGGN + + +__all__ = ['FullSubnetLaplace'] + + +class SubnetLaplace(ParametricLaplace): + """Baseclass for all subnetwork Laplace approximations in this library. + Subclasses specify the structure of the Hessian approximation. + See `BaseLaplace` for the full interface. + + A Laplace approximation is represented by a MAP which is given by the + `model` parameter and a posterior precision or covariance specifying + a Gaussian distribution \\(\\mathcal{N}(\\theta_{MAP}, P^{-1})\\). + Here, only the parameters of a subnetwork of the neural network + are treated probabilistically. + The goal of this class is to compute the posterior precision \\(P\\) + which sums as + \\[ + P = \\sum_{n=1}^N \\nabla^2_\\theta \\log p(\\mathcal{D}_n \\mid \\theta) + \\vert_{\\theta_{MAP}} + \\nabla^2_\\theta \\log p(\\theta) \\vert_{\\theta_{MAP}}. + \\] + There is one subclass, which implements the only supported option of a full + approximation to the log likelihood Hessian. The prior is assumed to be Gaussian and + therefore we have a simple form for + \\(\\nabla^2_\\theta \\log p(\\theta) \\vert_{\\theta_{MAP}} = P_0 \\). + In particular, we assume a scalar or diagonal prior precision so that in + all cases \\(P_0 = \\textrm{diag}(p_0)\\) and the structure of \\(p_0\\) can be varied. + + Parameters + ---------- + model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` + likelihood : {'classification', 'regression'} + determines the log likelihood Hessian approximation + subnetwork_mask : torch.Tensor, default=None + mask defining the subnetwork to apply the Laplace approximation over + sigma_noise : torch.Tensor or float, default=1 + observation noise for the regression setting; must be 1 for classification + prior_precision : torch.Tensor or float, default=1 + prior precision of a Gaussian prior (= weight decay); + can be scalar, per-layer, or diagonal in the most general case + prior_mean : torch.Tensor or float, default=0 + prior mean of a Gaussian prior, useful for continual learning + temperature : float, default=1 + temperature of the likelihood; lower temperature leads to more + concentrated posterior and vice versa. + backend : subclasses of `laplace.curvature.CurvatureInterface` + backend for access to curvature/Hessian approximations + backend_kwargs : dict, default=None + arguments passed to the backend on initialization, for example to + set the number of MC samples for stochastic approximations. + """ + def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prior_precision=1., + prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): + super().__init__(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_precision, + prior_mean=prior_mean, temperature=temperature, backend=backend, + backend_kwargs=backend_kwargs) + self.subnetwork_mask = subnetwork_mask + + @property + def subnetwork_mask(self): + return self._subnetwork_mask + + @subnetwork_mask.setter + def subnetwork_mask(self, subnetwork_mask): + """Check validity of subnetwork mask and convert it to a vector of indices of the vectorized + model parameters that define the subnetwork to apply the Laplace approximation over. + """ + if isinstance(subnetwork_mask, torch.Tensor) and len(subnetwork_mask.shape) == 1: + if len(subnetwork_mask) == self.n_params and\ + len(subnetwork_mask[subnetwork_mask == 0]) +\ + len(subnetwork_mask[subnetwork_mask == 1]) == self.n_params: + self._subnetwork_mask = subnetwork_mask.nonzero(as_tuple=True)[0] + + elif len(subnetwork_mask) <= self.n_params and\ + len(subnetwork_mask[subnetwork_mask >= self.n_params]) == 0: + self._subnetwork_mask = subnetwork_mask + + else: + raise ValueError('Subnetwork mask needs to identify the subnetwork parameters\ + from the vectorized model parameters as:\ + 1) a vector of indices of the subnetwork parameters,\ + 2) a binary vector of size (parameters) where 1s locate the subnetwork parameters') + + elif subnetwork_mask is None: + raise ValueError('You need to specify a subnetwork mask!') + + else: + raise ValueError('Subnetwork mask needs to be 1-dimensional torch.Tensor!') + + # Q: do we allow changing the subnetwork after instantiation, or should it stay fixed? + #self._backend_kwargs['subnetwork_mask'] = self._subnetwork_mask + self.backend.subnetwork_indices = self._subnetwork_mask + + # Q: documentation: should I mention subnetworks everywhere and write down the number + # of parameters? + + # Q jacobian() is static and therefore cannot access self.subnetwork_indices (need to pass it) + # what about making it non-static? it's also ugly in l. 563 of baselaplace.py! + + + +class FullSubnetLaplace(SubnetLaplace, FullLaplace): + """Subnetwork Laplace approximation with full, i.e., dense, log likelihood Hessian approximation + and hence posterior precision. Based on the chosen `backend` parameter, the full + approximation can be, for example, a generalized Gauss-Newton matrix. + Mathematically, we have \\(P \\in \\mathbb{R}^{P \\times P}\\). + See `FullLaplace`, `LLLaplace`, and `BaseLaplace` for the full interface. + """ + # key to map to correct subclass of BaseLaplace, (subset of weights, Hessian structure) + _key = ('subnetwork', 'full') + + def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prior_precision=1., + prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): + super().__init__(model, likelihood, subnetwork_mask, sigma_noise, prior_precision, + prior_mean, temperature, backend, backend_kwargs) From 88e806ce49fe56579eafef694648160503c9e5c8 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 30 Nov 2021 10:26:26 +0000 Subject: [PATCH 02/49] Fix issues with subnetwork Laplace integration --- laplace/__init__.py | 5 ++++- laplace/subnetlaplace.py | 45 ++++++++++++++++++++++++++++++++-------- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/laplace/__init__.py b/laplace/__init__.py index 2f866125..32f67929 100644 --- a/laplace/__init__.py +++ b/laplace/__init__.py @@ -8,10 +8,13 @@ from laplace.baselaplace import BaseLaplace, ParametricLaplace, FullLaplace, KronLaplace, DiagLaplace from laplace.lllaplace import LLLaplace, FullLLLaplace, KronLLLaplace, DiagLLLaplace +from laplace.subnetlaplace import SubnetLaplace, FullSubnetLaplace from laplace.laplace import Laplace __all__ = ['Laplace', # direct access to all Laplace classes via unified interface 'BaseLaplace', 'ParametricLaplace', # base-class and its (first-level) subclasses 'FullLaplace', 'KronLaplace', 'DiagLaplace', # all-weights 'LLLaplace', # base-class last-layer - 'FullLLLaplace', 'KronLLLaplace', 'DiagLLLaplace'] # last-layer + 'FullLLLaplace', 'KronLLLaplace', 'DiagLLLaplace', # last-layer + 'SubnetLaplace', # base-class subnetwork + 'FullSubnetLaplace'] # subnetwork diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 30bf15d2..213b1ef0 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -60,6 +60,7 @@ def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prio prior_mean=prior_mean, temperature=temperature, backend=backend, backend_kwargs=backend_kwargs) self.subnetwork_mask = subnetwork_mask + self.n_params_subnet = len(self.subnetwork_mask) @property def subnetwork_mask(self): @@ -70,8 +71,12 @@ def subnetwork_mask(self, subnetwork_mask): """Check validity of subnetwork mask and convert it to a vector of indices of the vectorized model parameters that define the subnetwork to apply the Laplace approximation over. """ - if isinstance(subnetwork_mask, torch.Tensor) and len(subnetwork_mask.shape) == 1: - if len(subnetwork_mask) == self.n_params and\ + if isinstance(subnetwork_mask, torch.Tensor): + if subnetwork_mask.type() not in ['torch.ByteTensor', 'torch.IntTensor', 'torch.LongTensor'] or\ + len(subnetwork_mask.shape) != 1: + raise ValueError('Subnetwork mask needs to be 1-dimensional torch.{Byte,Int,Long}Tensor!') + + elif len(subnetwork_mask) == self.n_params and\ len(subnetwork_mask[subnetwork_mask == 0]) +\ len(subnetwork_mask[subnetwork_mask == 1]) == self.n_params: self._subnetwork_mask = subnetwork_mask.nonzero(as_tuple=True)[0] @@ -81,16 +86,16 @@ def subnetwork_mask(self, subnetwork_mask): self._subnetwork_mask = subnetwork_mask else: - raise ValueError('Subnetwork mask needs to identify the subnetwork parameters\ - from the vectorized model parameters as:\ - 1) a vector of indices of the subnetwork parameters,\ - 2) a binary vector of size (parameters) where 1s locate the subnetwork parameters') + raise ValueError('Subnetwork mask needs to identify the subnetwork parameters '\ + 'from the vectorized model parameters as:\n'\ + '1) a vector of indices of the subnetwork parameters, or\n'\ + '2) a binary vector of size (parameters) where 1s locate the subnetwork parameters.') elif subnetwork_mask is None: - raise ValueError('You need to specify a subnetwork mask!') + raise ValueError('Subnetwork Laplace requires passing a subnetwork mask!') else: - raise ValueError('Subnetwork mask needs to be 1-dimensional torch.Tensor!') + raise ValueError('Subnetwork mask needs to be torch.Tensor!') # Q: do we allow changing the subnetwork after instantiation, or should it stay fixed? #self._backend_kwargs['subnetwork_mask'] = self._subnetwork_mask @@ -101,7 +106,26 @@ def subnetwork_mask(self, subnetwork_mask): # Q jacobian() is static and therefore cannot access self.subnetwork_indices (need to pass it) # what about making it non-static? it's also ugly in l. 563 of baselaplace.py! - + + # still need to implement nn mc predictive (need to sample subnet separately and then put samples together) + + @property + def prior_precision_diag(self): + """Obtain the diagonal prior precision \\(p_0\\) constructed from either + a scalar or diagonal prior precision. + + Returns + ------- + prior_precision_diag : torch.Tensor + """ + if len(self.prior_precision) == 1: # scalar + return self.prior_precision * torch.ones(self.n_params_subnet, device=self._device) + + elif len(self.prior_precision) == self.n_params_subnet: # diagonal + return self.prior_precision + + else: + raise ValueError('Mismatch of prior and model. Diagonal or scalar prior.') class FullSubnetLaplace(SubnetLaplace, FullLaplace): @@ -118,3 +142,6 @@ def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prio prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): super().__init__(model, likelihood, subnetwork_mask, sigma_noise, prior_precision, prior_mean, temperature, backend, backend_kwargs) + + def _init_H(self): + self.H = torch.zeros(self.n_params_subnet, self.n_params_subnet, device=self._device) From 768fa63e56a57f7343801f4474ff3f13effbcb81 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 30 Nov 2021 10:28:41 +0000 Subject: [PATCH 03/49] Remove notes to myself --- laplace/subnetlaplace.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 213b1ef0..298ddffa 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -97,18 +97,8 @@ def subnetwork_mask(self, subnetwork_mask): else: raise ValueError('Subnetwork mask needs to be torch.Tensor!') - # Q: do we allow changing the subnetwork after instantiation, or should it stay fixed? - #self._backend_kwargs['subnetwork_mask'] = self._subnetwork_mask self.backend.subnetwork_indices = self._subnetwork_mask - # Q: documentation: should I mention subnetworks everywhere and write down the number - # of parameters? - - # Q jacobian() is static and therefore cannot access self.subnetwork_indices (need to pass it) - # what about making it non-static? it's also ugly in l. 563 of baselaplace.py! - - # still need to implement nn mc predictive (need to sample subnet separately and then put samples together) - @property def prior_precision_diag(self): """Obtain the diagonal prior precision \\(p_0\\) constructed from either From f8ab8ac4532c24fbbf097ba6a85672b4a690d880 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 07:44:17 +0000 Subject: [PATCH 04/49] Remove SubnetLaplace base class; only option remains FullSubnetLaplace --- laplace/__init__.py | 3 +-- laplace/subnetlaplace.py | 50 +++++++++++++++++----------------------- 2 files changed, 22 insertions(+), 31 deletions(-) diff --git a/laplace/__init__.py b/laplace/__init__.py index 32f67929..e34989b0 100644 --- a/laplace/__init__.py +++ b/laplace/__init__.py @@ -8,7 +8,7 @@ from laplace.baselaplace import BaseLaplace, ParametricLaplace, FullLaplace, KronLaplace, DiagLaplace from laplace.lllaplace import LLLaplace, FullLLLaplace, KronLLLaplace, DiagLLLaplace -from laplace.subnetlaplace import SubnetLaplace, FullSubnetLaplace +from laplace.subnetlaplace import FullSubnetLaplace from laplace.laplace import Laplace __all__ = ['Laplace', # direct access to all Laplace classes via unified interface @@ -16,5 +16,4 @@ 'FullLaplace', 'KronLaplace', 'DiagLaplace', # all-weights 'LLLaplace', # base-class last-layer 'FullLLLaplace', 'KronLLLaplace', 'DiagLLLaplace', # last-layer - 'SubnetLaplace', # base-class subnetwork 'FullSubnetLaplace'] # subnetwork diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 298ddffa..9749c1f6 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -1,6 +1,6 @@ import torch -from laplace.baselaplace import ParametricLaplace, FullLaplace +from laplace.baselaplace import FullLaplace from laplace.curvature import BackPackGGN @@ -8,29 +8,34 @@ __all__ = ['FullSubnetLaplace'] -class SubnetLaplace(ParametricLaplace): - """Baseclass for all subnetwork Laplace approximations in this library. - Subclasses specify the structure of the Hessian approximation. - See `BaseLaplace` for the full interface. +class FullSubnetLaplace(FullLaplace): + """Class for subnetwork Laplace, which computes the Laplace approximation over + just a subset of the model parameters (i.e. a subnetwork within the neural network). + Subnetwork Laplace only supports a full Hessian approximation; other Hessian + approximations could be used in theory, but would not make as much sense conceptually. A Laplace approximation is represented by a MAP which is given by the `model` parameter and a posterior precision or covariance specifying a Gaussian distribution \\(\\mathcal{N}(\\theta_{MAP}, P^{-1})\\). - Here, only the parameters of a subnetwork of the neural network - are treated probabilistically. + Here, only a subset of the model parameters (i.e. a subnetwork of the + neural network) are treated probabilistically. The goal of this class is to compute the posterior precision \\(P\\) which sums as \\[ P = \\sum_{n=1}^N \\nabla^2_\\theta \\log p(\\mathcal{D}_n \\mid \\theta) \\vert_{\\theta_{MAP}} + \\nabla^2_\\theta \\log p(\\theta) \\vert_{\\theta_{MAP}}. \\] - There is one subclass, which implements the only supported option of a full - approximation to the log likelihood Hessian. The prior is assumed to be Gaussian and - therefore we have a simple form for + The prior is assumed to be Gaussian and therefore we have a simple form for \\(\\nabla^2_\\theta \\log p(\\theta) \\vert_{\\theta_{MAP}} = P_0 \\). In particular, we assume a scalar or diagonal prior precision so that in all cases \\(P_0 = \\textrm{diag}(p_0)\\) and the structure of \\(p_0\\) can be varied. + The subnetwork Laplace approximation only supports a full, i.e., dense, log likelihood + Hessian approximation and hence posterior precision. Based on the chosen `backend` + parameter, the full approximation can be, for example, a generalized Gauss-Newton + matrix. Mathematically, we have \\(P \\in \\mathbb{R}^{P \\times P}\\). + See `FullLaplace` and `BaseLaplace` for the full interface. + Parameters ---------- model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` @@ -54,6 +59,9 @@ class SubnetLaplace(ParametricLaplace): arguments passed to the backend on initialization, for example to set the number of MC samples for stochastic approximations. """ + # key to map to correct subclass of BaseLaplace, (subset of weights, Hessian structure) + _key = ('subnetwork', 'full') + def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prior_precision=1., prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): super().__init__(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_precision, @@ -62,6 +70,9 @@ def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prio self.subnetwork_mask = subnetwork_mask self.n_params_subnet = len(self.subnetwork_mask) + def _init_H(self): + self.H = torch.zeros(self.n_params_subnet, self.n_params_subnet, device=self._device) + @property def subnetwork_mask(self): return self._subnetwork_mask @@ -116,22 +127,3 @@ def prior_precision_diag(self): else: raise ValueError('Mismatch of prior and model. Diagonal or scalar prior.') - - -class FullSubnetLaplace(SubnetLaplace, FullLaplace): - """Subnetwork Laplace approximation with full, i.e., dense, log likelihood Hessian approximation - and hence posterior precision. Based on the chosen `backend` parameter, the full - approximation can be, for example, a generalized Gauss-Newton matrix. - Mathematically, we have \\(P \\in \\mathbb{R}^{P \\times P}\\). - See `FullLaplace`, `LLLaplace`, and `BaseLaplace` for the full interface. - """ - # key to map to correct subclass of BaseLaplace, (subset of weights, Hessian structure) - _key = ('subnetwork', 'full') - - def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prior_precision=1., - prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): - super().__init__(model, likelihood, subnetwork_mask, sigma_noise, prior_precision, - prior_mean, temperature, backend, backend_kwargs) - - def _init_H(self): - self.H = torch.zeros(self.n_params_subnet, self.n_params_subnet, device=self._device) From 57f46d2b4f9909eecd1117635ffb6bf3fb7f8da5 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 08:16:39 +0000 Subject: [PATCH 05/49] Make jacobians and last_layer_jacobians non-static and adapted code accordingly (incl. tests) --- laplace/baselaplace.py | 2 +- laplace/curvature/asdl.py | 10 +++------- laplace/curvature/backpack.py | 10 +++------- laplace/curvature/curvature.py | 11 +++-------- tests/test_jacobians.py | 25 +++++++++++++++---------- 5 files changed, 25 insertions(+), 33 deletions(-) diff --git a/laplace/baselaplace.py b/laplace/baselaplace.py index 4b9ce704..527a130c 100644 --- a/laplace/baselaplace.py +++ b/laplace/baselaplace.py @@ -560,7 +560,7 @@ def predictive_samples(self, x, pred_type='glm', n_samples=100): @torch.enable_grad() def _glm_predictive_distribution(self, X): - Js, f_mu = self.backend.jacobians(self.model, X, self.backend.subnetwork_indices) + Js, f_mu = self.backend.jacobians(self.model, X) f_var = self.functional_variance(Js) return f_mu.detach(), f_var.detach() diff --git a/laplace/curvature/asdl.py b/laplace/curvature/asdl.py index afe3e673..2d9dc0bb 100644 --- a/laplace/curvature/asdl.py +++ b/laplace/curvature/asdl.py @@ -20,8 +20,7 @@ def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None) raise ValueError('This backend only supports classification currently.') super().__init__(model, likelihood, last_layer, subnetwork_indices) - @staticmethod - def jacobians(model, x, subnetwork_indices=None): + def jacobians(self, model, x): """Compute Jacobians \\(\\nabla_\\theta f(x;\\theta)\\) at current parameter \\(\\theta\\) using asdfghjkl's gradient per output dimension. @@ -30,9 +29,6 @@ def jacobians(model, x, subnetwork_indices=None): model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. - subnetwork_indices : torch.Tensor, default=None - indices of the vectorized model parameters that define the subnetwork - to apply the Laplace approximation over Returns ------- @@ -48,8 +44,8 @@ def loss_fn(outputs, targets): f = batch_gradient(model, loss_fn, x, None).detach() Jk = _get_batch_grad(model) - if subnetwork_indices is not None: - Jk = Jk[:, subnetwork_indices] + if self.subnetwork_indices is not None: + Jk = Jk[:, self.subnetwork_indices] Js.append(Jk) Js = torch.stack(Js, dim=1) return Js, f diff --git a/laplace/curvature/backpack.py b/laplace/curvature/backpack.py index 6e655944..42599729 100644 --- a/laplace/curvature/backpack.py +++ b/laplace/curvature/backpack.py @@ -16,8 +16,7 @@ def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None) extend(self._model) extend(self.lossfunc) - @staticmethod - def jacobians(model, x, subnetwork_indices=None): + def jacobians(self, model, x): """Compute Jacobians \\(\\nabla_{\\theta} f(x;\\theta)\\) at current parameter \\(\\theta\\) using backpack's BatchGrad per output dimension. @@ -26,9 +25,6 @@ def jacobians(model, x, subnetwork_indices=None): model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. - subnetwork_indices : torch.Tensor, default=None - indices of the vectorized model parameters that define the subnetwork - to apply the Laplace approximation over Returns ------- @@ -52,8 +48,8 @@ def jacobians(model, x, subnetwork_indices=None): to_cat.append(param.grad_batch.detach().reshape(x.shape[0], -1)) delattr(param, 'grad_batch') Jk = torch.cat(to_cat, dim=1) - if subnetwork_indices is not None: - Jk = Jk[:, subnetwork_indices] + if self.subnetwork_indices is not None: + Jk = Jk[:, self.subnetwork_indices] to_stack.append(Jk) if i == 0: f = out.detach() diff --git a/laplace/curvature/curvature.py b/laplace/curvature/curvature.py index 735fd74d..373bf398 100644 --- a/laplace/curvature/curvature.py +++ b/laplace/curvature/curvature.py @@ -44,8 +44,7 @@ def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None) def _model(self): return self.model.last_layer if self.last_layer else self.model - @staticmethod - def jacobians(model, x, subnetwork_indices=None): + def jacobians(self, model, x): """Compute Jacobians \\(\\nabla_\\theta f(x;\\theta)\\) at current parameter \\(\\theta\\). Parameters @@ -53,9 +52,6 @@ def jacobians(model, x, subnetwork_indices=None): model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. - subnetwork_indices : torch.Tensor, default=None - indices of the vectorized model parameters that define the subnetwork - to apply the Laplace approximation over Returns ------- @@ -66,8 +62,7 @@ def jacobians(model, x, subnetwork_indices=None): """ raise NotImplementedError - @staticmethod - def last_layer_jacobians(model, x): + def last_layer_jacobians(self, model, x): """Compute Jacobians \\(\\nabla_{\\theta_\\textrm{last}} f(x;\\theta_\\textrm{last})\\) only at current last-layer parameter \\(\\theta_{\\textrm{last}}\\). @@ -249,7 +244,7 @@ def full(self, x, y, **kwargs): if self.last_layer: Js, f = self.last_layer_jacobians(self.model, x) else: - Js, f = self.jacobians(self.model, x, self.subnetwork_indices) + Js, f = self.jacobians(self.model, x) loss, H_ggn = self._get_full_ggn(Js, f, y) return loss, H_ggn diff --git a/tests/test_jacobians.py b/tests/test_jacobians.py index 7a5a22ef..8f676db1 100644 --- a/tests/test_jacobians.py +++ b/tests/test_jacobians.py @@ -35,9 +35,10 @@ def X(): return torch.randn(200, 3) -@pytest.mark.parametrize('backend', [AsdlInterface, BackPackInterface]) -def test_linear_jacobians(linear_model, X, backend): +@pytest.mark.parametrize('backend_cls', [AsdlInterface, BackPackInterface]) +def test_linear_jacobians(linear_model, X, backend_cls): # jacobian of linear model is input X. + backend = backend_cls(linear_model, 'classification') Js, f = backend.jacobians(linear_model, X) # into Jacs shape (batch_size, output_size, params) true_Js = X.reshape(len(X), 1, -1) @@ -46,9 +47,10 @@ def test_linear_jacobians(linear_model, X, backend): assert torch.allclose(f, linear_model(X), atol=1e-5) -@pytest.mark.parametrize('backend', [AsdlInterface, BackPackInterface]) -def test_jacobians_singleoutput(singleoutput_model, X, backend): +@pytest.mark.parametrize('backend_cls', [AsdlInterface, BackPackInterface]) +def test_jacobians_singleoutput(singleoutput_model, X, backend_cls): model = singleoutput_model + backend = backend_cls(model, 'classification') Js, f = backend.jacobians(model, X) Js_naive, f_naive = jacobians_naive(model, X) assert Js.shape == Js_naive.shape @@ -57,9 +59,10 @@ def test_jacobians_singleoutput(singleoutput_model, X, backend): assert torch.allclose(f, f_naive) -@pytest.mark.parametrize('backend', [AsdlInterface, BackPackInterface]) -def test_jacobians_multioutput(multioutput_model, X, backend): +@pytest.mark.parametrize('backend_cls', [AsdlInterface, BackPackInterface]) +def test_jacobians_multioutput(multioutput_model, X, backend_cls): model = multioutput_model + backend = backend_cls(model, 'classification') Js, f = backend.jacobians(model, X) Js_naive, f_naive = jacobians_naive(model, X) assert Js.shape == Js_naive.shape @@ -68,9 +71,10 @@ def test_jacobians_multioutput(multioutput_model, X, backend): assert torch.allclose(f, f_naive) -@pytest.mark.parametrize('backend', [AsdlInterface, BackPackInterface]) -def test_last_layer_jacobians_singleoutput(singleoutput_model, X, backend): +@pytest.mark.parametrize('backend_cls', [AsdlInterface, BackPackInterface]) +def test_last_layer_jacobians_singleoutput(singleoutput_model, X, backend_cls): model = FeatureExtractor(singleoutput_model) + backend = backend_cls(model, 'classification') Js, f = backend.last_layer_jacobians(model, X) _, phi = model.forward_with_features(X) Js_naive, f_naive = jacobians_naive(model.last_layer, phi) @@ -80,9 +84,10 @@ def test_last_layer_jacobians_singleoutput(singleoutput_model, X, backend): assert torch.allclose(f, f_naive) -@pytest.mark.parametrize('backend', [AsdlInterface, BackPackInterface]) -def test_last_layer_jacobians_multioutput(multioutput_model, X, backend): +@pytest.mark.parametrize('backend_cls', [AsdlInterface, BackPackInterface]) +def test_last_layer_jacobians_multioutput(multioutput_model, X, backend_cls): model = FeatureExtractor(multioutput_model) + backend = backend_cls(model, 'classification') Js, f = backend.last_layer_jacobians(model, X) _, phi = model.forward_with_features(X) Js_naive, f_naive = jacobians_naive(model.last_layer, phi) From 253012204613b699186c6ec80fc79d0f7315e24e Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 15:12:51 +0000 Subject: [PATCH 06/49] Add SubnetMask baseclass and subclasses for random, largest magnitude, and last-layer subnet masks --- laplace/subnetmask.py | 193 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 laplace/subnetmask.py diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py new file mode 100644 index 00000000..e85790fc --- /dev/null +++ b/laplace/subnetmask.py @@ -0,0 +1,193 @@ +import torch +from torch.nn.utils import parameters_to_vector + +from laplace.feature_extractor import FeatureExtractor + +__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LastLayerSubnetMask'] + + +class SubnetMask: + """Baseclass for all subnetwork masks in this library (for subnetwork Laplace). + + Parameters + ---------- + model : torch.nn.Module + """ + def __init__(self, model): + self.model = model + self.parameter_vector = parameters_to_vector(self.model.parameters()).detach() + self._n_params = len(self.parameter_vector) + self._device = next(self.model.parameters()).device + self._indices = None + self._n_params_subnet = None + + @property + def n_params_subnet(self): + raise NotImplementedError + + def _check_select(self): + if self._indices is None: + raise AttributeError('Subnetwork mask not selected. Run select() first.') + + @property + def indices(self): + self._check_select() + return self._indices + + def convert_subnet_mask_to_indices(self, subnet_mask): + """Converts a subnetwork mask into subnetwork indices. + + Parameters + ---------- + subnet_mask : torch.Tensor + a binary vector of size (n_params) where 1s locate the subnetwork parameters + within the vectorized model parameters + + Returns + ------- + subnet_mask_indices : torch.Tensor + a vector of indices of the vectorized model parameters that define the subnetwork + """ + if not isinstance(subnet_mask, torch.Tensor): + raise ValueError('Subnetwork mask needs to be torch.Tensor!') + elif subnet_mask.type() not in ['torch.ByteTensor', 'torch.IntTensor', 'torch.LongTensor'] or\ + len(subnet_mask.shape) != 1: + raise ValueError('Subnetwork mask needs to be 1-dimensional torch.{Byte,Int,Long}Tensor!') + elif len(subnet_mask) != self._n_params or\ + len(subnet_mask[subnet_mask == 0]) + len(subnet_mask[subnet_mask == 1]) != self._n_params: + raise ValueError('Subnetwork mask needs to be a binary vector of size (n_params) where 1s'\ + 'locate the subnetwork parameters within the vectorized model parameters!') + + subnet_mask_indices = subnet_mask.nonzero(as_tuple=True)[0] + return subnet_mask_indices + + def select(self, train_loader): + """ Select the subnetwork mask. + + Parameters + ---------- + train_loader : torch.data.utils.DataLoader + each iterate is a training batch (X, y); + `train_loader.dataset` needs to be set to access \\(N\\), size of the data set + """ + if self._indices is not None: + raise ValueError('Subnetwork mask already selected.') + + subnet_mask = self.get_subnet_mask(train_loader) + self._indices = self.convert_subnet_mask_to_indices(subnet_mask) + + def get_subnet_mask(self, train_loader): + """ Get the subnetwork mask. + + Parameters + ---------- + train_loader : torch.data.utils.DataLoader + each iterate is a training batch (X, y); + `train_loader.dataset` needs to be set to access \\(N\\), size of the data set + + Returns + ------- + subnet_mask: torch.Tensor + a binary vector of size (n_params) where 1s locate the subnetwork parameters + within the vectorized model parameters + """ + raise NotImplementedError + + +class ScoreBasedSubnetMask(SubnetMask): + """Baseclass for subnetwork masks defined by selecting the top-scoring parameters according to some criterion. + + Parameters + ---------- + model : torch.nn.Module + n_params_subnet : int + the number of parameters in the subnetwork (i.e. the number of top-scoring parameters to select) + """ + def __init__(self, model, n_params_subnet): + super().__init__(model) + + if n_params_subnet is None: + raise ValueError(f'Need to pass number of subnetwork parameters when using subnetwork Laplace.') + if n_params_subnet > self._n_params: + raise ValueError(f'Subnetwork ({n_params_subnet}) cannot be larger than model ({self._n_params}).') + self._n_params_subnet = n_params_subnet + self._param_scores = None + + @property + def n_params_subnet(self): + return self._n_params_subnet + + def compute_param_scores(self, train_loader): + raise NotImplementedError + + def _check_param_scores(self): + if self._param_scores.shape != self.parameter_vector.shape: + raise ValueError('Parameter scores need to be of same shape as parameter vector.') + + def get_subnet_mask(self, train_loader): + """ Get the subnetwork mask by ranking parameters based on their scores.""" + + if self._param_scores is None: + self._param_scores = self.compute_param_scores(train_loader) + self._check_param_scores() + + idx = torch.argsort(self._param_scores, descending=True)[:self._n_params_subnet] + idx = idx.sort()[0] + subnet_mask = torch.zeros_like(self.parameter_vector).byte() + subnet_mask[idx] = 1 + return subnet_mask + + +class RandomSubnetMask(ScoreBasedSubnetMask): + """Subnetwork mask of parameters sampled uniformly at random.""" + def compute_param_scores(self, train_loader): + return torch.rand_like(self.parameter_vector) + + +class LargestMagnitudeSubnetMask(ScoreBasedSubnetMask): + """Subnetwork mask identifying the parameters with the largest magnitude. """ + def compute_param_scores(self, train_loader): + return self.parameter_vector + + +class LastLayerSubnetMask(SubnetMask): + """Subnetwork mask corresponding to the last layer of the neural network. + + Parameters + ---------- + model : torch.nn.Module + last_layer_name: str, default=None + name of the model's last layer, if None it will be determined automatically + """ + def __init__(self, model, last_layer_name=None): + super().__init__(model) + self.model = FeatureExtractor(self.model, last_layer_name=last_layer_name) + self._n_params_subnet = None + + @property + def n_params_subnet(self): + if self._n_params_subnet is None: + self._check_select() + self._n_params_subnet = torch.count_nonzero(self._indices).item() + return self._n_params_subnet + + def get_subnet_mask(self, train_loader): + """ Get the subnetwork mask identifying the last layer.""" + + self.model.eval() + if self.model.last_layer is None: + X, _ = next(iter(train_loader)) + with torch.no_grad(): + self.model.find_last_layer(X[:1].to(self._device)) + + subnet_mask_list = [] + for name, layer in self.model.model.named_modules(): + if len(list(layer.children())) > 0: + continue + if name == self.model._last_layer_name: + mask_method = torch.ones_like + else: + mask_method = torch.zeros_like + subnet_mask_list.append(mask_method(parameters_to_vector(layer.parameters()))) + subnet_mask = torch.cat(subnet_mask_list).byte() + return subnet_mask From c0be3f92c5fc8f5bc041c3a44f555e6928e5fcab Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 15:13:47 +0000 Subject: [PATCH 07/49] Adapt FullSubnetLaplace to use new SubnetMask class interface --- laplace/subnetlaplace.py | 66 +++++++++++++++------------------------- 1 file changed, 25 insertions(+), 41 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 9749c1f6..01d8735e 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -41,7 +41,7 @@ class FullSubnetLaplace(FullLaplace): model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` likelihood : {'classification', 'regression'} determines the log likelihood Hessian approximation - subnetwork_mask : torch.Tensor, default=None + subnetwork_mask : subclasses of `laplace.subnetmask.SubnetMask`, default=None mask defining the subnetwork to apply the Laplace approximation over sigma_noise : torch.Tensor or float, default=1 observation noise for the regression setting; must be 1 for classification @@ -58,58 +58,24 @@ class FullSubnetLaplace(FullLaplace): backend_kwargs : dict, default=None arguments passed to the backend on initialization, for example to set the number of MC samples for stochastic approximations. + subnetmask_kwargs : dict, default=None + arguments passed to the subnetwork mask on initialization. """ # key to map to correct subclass of BaseLaplace, (subset of weights, Hessian structure) _key = ('subnetwork', 'full') def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prior_precision=1., - prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): + prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None, subnetmask_kwargs=None): super().__init__(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_precision, prior_mean=prior_mean, temperature=temperature, backend=backend, backend_kwargs=backend_kwargs) - self.subnetwork_mask = subnetwork_mask - self.n_params_subnet = len(self.subnetwork_mask) + self._subnetmask_kwargs = dict() if subnetmask_kwargs is None else subnetmask_kwargs + self.subnetwork_mask = subnetwork_mask(self.model, **self._subnetmask_kwargs) + self.n_params_subnet = None def _init_H(self): self.H = torch.zeros(self.n_params_subnet, self.n_params_subnet, device=self._device) - @property - def subnetwork_mask(self): - return self._subnetwork_mask - - @subnetwork_mask.setter - def subnetwork_mask(self, subnetwork_mask): - """Check validity of subnetwork mask and convert it to a vector of indices of the vectorized - model parameters that define the subnetwork to apply the Laplace approximation over. - """ - if isinstance(subnetwork_mask, torch.Tensor): - if subnetwork_mask.type() not in ['torch.ByteTensor', 'torch.IntTensor', 'torch.LongTensor'] or\ - len(subnetwork_mask.shape) != 1: - raise ValueError('Subnetwork mask needs to be 1-dimensional torch.{Byte,Int,Long}Tensor!') - - elif len(subnetwork_mask) == self.n_params and\ - len(subnetwork_mask[subnetwork_mask == 0]) +\ - len(subnetwork_mask[subnetwork_mask == 1]) == self.n_params: - self._subnetwork_mask = subnetwork_mask.nonzero(as_tuple=True)[0] - - elif len(subnetwork_mask) <= self.n_params and\ - len(subnetwork_mask[subnetwork_mask >= self.n_params]) == 0: - self._subnetwork_mask = subnetwork_mask - - else: - raise ValueError('Subnetwork mask needs to identify the subnetwork parameters '\ - 'from the vectorized model parameters as:\n'\ - '1) a vector of indices of the subnetwork parameters, or\n'\ - '2) a binary vector of size (parameters) where 1s locate the subnetwork parameters.') - - elif subnetwork_mask is None: - raise ValueError('Subnetwork Laplace requires passing a subnetwork mask!') - - else: - raise ValueError('Subnetwork mask needs to be torch.Tensor!') - - self.backend.subnetwork_indices = self._subnetwork_mask - @property def prior_precision_diag(self): """Obtain the diagonal prior precision \\(p_0\\) constructed from either @@ -127,3 +93,21 @@ def prior_precision_diag(self): else: raise ValueError('Mismatch of prior and model. Diagonal or scalar prior.') + + def fit(self, train_loader): + """Fit the local Laplace approximation at the parameters of the subnetwork. + + Parameters + ---------- + train_loader : torch.data.utils.DataLoader + each iterate is a training batch (X, y); + `train_loader.dataset` needs to be set to access \\(N\\), size of the data set + """ + + # select subnetwork and pass it to backend + self.subnetwork_mask.select(train_loader) + self.backend.subnetwork_indices = self.subnetwork_mask.indices + self.n_params_subnet = self.subnetwork_mask.n_params_subnet + + # fit Laplace approximation over subnetwork + super().fit(train_loader) From 257d33bd775f993b4f63dc61a1a40135cb572713 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 15:54:47 +0000 Subject: [PATCH 08/49] Add support for largest variance subnet selection (using diagonal Laplace) --- laplace/subnetlaplace.py | 7 ++++++- laplace/subnetmask.py | 25 +++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 01d8735e..f67403be 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -1,8 +1,9 @@ import torch -from laplace.baselaplace import FullLaplace +from laplace.baselaplace import FullLaplace, DiagLaplace from laplace.curvature import BackPackGGN +from laplace.subnetmask import LargestVarianceDiagLaplaceSubnetMask __all__ = ['FullSubnetLaplace'] @@ -70,6 +71,10 @@ def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prio prior_mean=prior_mean, temperature=temperature, backend=backend, backend_kwargs=backend_kwargs) self._subnetmask_kwargs = dict() if subnetmask_kwargs is None else subnetmask_kwargs + if subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask: + # instantiate and pass diagonal Laplace model for largest variance subnetwork selection + self._subnetmask_kwargs.update(diag_laplace_model=DiagLaplace(self.model, likelihood, sigma_noise, + prior_precision, prior_mean, temperature, backend, backend_kwargs)) self.subnetwork_mask = subnetwork_mask(self.model, **self._subnetmask_kwargs) self.n_params_subnet = None diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index e85790fc..b32a7ba4 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -3,7 +3,7 @@ from laplace.feature_extractor import FeatureExtractor -__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LastLayerSubnetMask'] +__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LastLayerSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask'] class SubnetMask: @@ -125,7 +125,7 @@ def _check_param_scores(self): raise ValueError('Parameter scores need to be of same shape as parameter vector.') def get_subnet_mask(self, train_loader): - """ Get the subnetwork mask by ranking parameters based on their scores.""" + """ Get the subnetwork mask by (descendingly) ranking parameters based on their scores.""" if self._param_scores is None: self._param_scores = self.compute_param_scores(train_loader) @@ -150,6 +150,27 @@ def compute_param_scores(self, train_loader): return self.parameter_vector +class LargestVarianceDiagLaplaceSubnetMask(ScoreBasedSubnetMask): + """Subnetwork mask identifying the parameters with the largest marginal variances + (estimated using a diagional Laplace approximation over all model parameters). + + Parameters + ---------- + model : torch.nn.Module + n_params_subnet : int + the number of parameters in the subnetwork (i.e. the number of top-scoring parameters to select) + diag_laplace_model : `laplace.baselaplace.DiagLaplace` + diagonal Laplace model to use for variance estimation + """ + def __init__(self, model, n_params_subnet, diag_laplace_model): + super().__init__(model, n_params_subnet) + self.diag_laplace_model = diag_laplace_model + + def compute_param_scores(self, train_loader): + self.diag_laplace_model.fit(train_loader) + return self.diag_laplace_model.posterior_variance + + class LastLayerSubnetMask(SubnetMask): """Subnetwork mask corresponding to the last layer of the neural network. From 3771e94920c2e53c700dbc47115a706604df220b Mon Sep 17 00:00:00 2001 From: Alex Immer Date: Fri, 10 Dec 2021 17:18:24 +0100 Subject: [PATCH 09/49] Remove change --- laplace/curvature/asdl.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/laplace/curvature/asdl.py b/laplace/curvature/asdl.py index 8bdc19ca..d65c4334 100644 --- a/laplace/curvature/asdl.py +++ b/laplace/curvature/asdl.py @@ -18,10 +18,6 @@ class AsdlInterface(CurvatureInterface): """Interface for asdfghjkl backend. """ - def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None): - if likelihood != 'classification': - raise ValueError('This backend only supports classification currently.') - super().__init__(model, likelihood, last_layer, subnetwork_indices) def jacobians(self, model, x): """Compute Jacobians \\(\\nabla_\\theta f(x;\\theta)\\) at current parameter \\(\\theta\\) From 38cd0f67620df75398e4c1bbc0c6df8f7ef386fd Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 16:21:32 +0000 Subject: [PATCH 10/49] Change FullSubnetLaplace to SubnetLaplace as it's the only option --- laplace/__init__.py | 4 ++-- laplace/subnetlaplace.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/laplace/__init__.py b/laplace/__init__.py index d092d50a..429b9f0d 100644 --- a/laplace/__init__.py +++ b/laplace/__init__.py @@ -9,7 +9,7 @@ from laplace.baselaplace import BaseLaplace, ParametricLaplace, FullLaplace, KronLaplace, DiagLaplace, LowRankLaplace from laplace.lllaplace import LLLaplace, FullLLLaplace, KronLLLaplace, DiagLLLaplace -from laplace.subnetlaplace import FullSubnetLaplace +from laplace.subnetlaplace import SubnetLaplace from laplace.laplace import Laplace from laplace.marglik_training import marglik_training @@ -18,5 +18,5 @@ 'FullLaplace', 'KronLaplace', 'DiagLaplace', 'LowRankLaplace', # all-weights 'LLLaplace', # base-class last-layer 'FullLLLaplace', 'KronLLLaplace', 'DiagLLLaplace', # last-layer - 'FullSubnetLaplace', # subnetwork + 'SubnetLaplace', # subnetwork 'marglik_training'] # methods diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index f67403be..90cd8aeb 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -6,10 +6,10 @@ from laplace.subnetmask import LargestVarianceDiagLaplaceSubnetMask -__all__ = ['FullSubnetLaplace'] +__all__ = ['SubnetLaplace'] -class FullSubnetLaplace(FullLaplace): +class SubnetLaplace(FullLaplace): """Class for subnetwork Laplace, which computes the Laplace approximation over just a subset of the model parameters (i.e. a subnetwork within the neural network). Subnetwork Laplace only supports a full Hessian approximation; other Hessian From f933dad8ec6137e2a0921b6dd8d1baac854c7555 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 16:25:00 +0000 Subject: [PATCH 11/49] Convert indentation from tabs to spaces --- laplace/subnetmask.py | 314 +++++++++++++++++++++--------------------- 1 file changed, 157 insertions(+), 157 deletions(-) diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index b32a7ba4..468734d4 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -7,77 +7,77 @@ class SubnetMask: - """Baseclass for all subnetwork masks in this library (for subnetwork Laplace). - - Parameters - ---------- - model : torch.nn.Module - """ - def __init__(self, model): - self.model = model - self.parameter_vector = parameters_to_vector(self.model.parameters()).detach() - self._n_params = len(self.parameter_vector) - self._device = next(self.model.parameters()).device - self._indices = None - self._n_params_subnet = None - - @property - def n_params_subnet(self): - raise NotImplementedError - - def _check_select(self): - if self._indices is None: - raise AttributeError('Subnetwork mask not selected. Run select() first.') - - @property - def indices(self): - self._check_select() - return self._indices - - def convert_subnet_mask_to_indices(self, subnet_mask): - """Converts a subnetwork mask into subnetwork indices. - - Parameters - ---------- - subnet_mask : torch.Tensor - a binary vector of size (n_params) where 1s locate the subnetwork parameters - within the vectorized model parameters - - Returns - ------- - subnet_mask_indices : torch.Tensor - a vector of indices of the vectorized model parameters that define the subnetwork - """ - if not isinstance(subnet_mask, torch.Tensor): - raise ValueError('Subnetwork mask needs to be torch.Tensor!') - elif subnet_mask.type() not in ['torch.ByteTensor', 'torch.IntTensor', 'torch.LongTensor'] or\ + """Baseclass for all subnetwork masks in this library (for subnetwork Laplace). + + Parameters + ---------- + model : torch.nn.Module + """ + def __init__(self, model): + self.model = model + self.parameter_vector = parameters_to_vector(self.model.parameters()).detach() + self._n_params = len(self.parameter_vector) + self._device = next(self.model.parameters()).device + self._indices = None + self._n_params_subnet = None + + @property + def n_params_subnet(self): + raise NotImplementedError + + def _check_select(self): + if self._indices is None: + raise AttributeError('Subnetwork mask not selected. Run select() first.') + + @property + def indices(self): + self._check_select() + return self._indices + + def convert_subnet_mask_to_indices(self, subnet_mask): + """Converts a subnetwork mask into subnetwork indices. + + Parameters + ---------- + subnet_mask : torch.Tensor + a binary vector of size (n_params) where 1s locate the subnetwork parameters + within the vectorized model parameters + + Returns + ------- + subnet_mask_indices : torch.Tensor + a vector of indices of the vectorized model parameters that define the subnetwork + """ + if not isinstance(subnet_mask, torch.Tensor): + raise ValueError('Subnetwork mask needs to be torch.Tensor!') + elif subnet_mask.type() not in ['torch.ByteTensor', 'torch.IntTensor', 'torch.LongTensor'] or\ len(subnet_mask.shape) != 1: - raise ValueError('Subnetwork mask needs to be 1-dimensional torch.{Byte,Int,Long}Tensor!') - elif len(subnet_mask) != self._n_params or\ + raise ValueError('Subnetwork mask needs to be 1-dimensional torch.{Byte,Int,Long}Tensor!') + elif len(subnet_mask) != self._n_params or\ len(subnet_mask[subnet_mask == 0]) + len(subnet_mask[subnet_mask == 1]) != self._n_params: - raise ValueError('Subnetwork mask needs to be a binary vector of size (n_params) where 1s'\ - 'locate the subnetwork parameters within the vectorized model parameters!') + raise ValueError('Subnetwork mask needs to be a binary vector of size (n_params) where 1s'\ + 'locate the subnetwork parameters within the vectorized model parameters!') - subnet_mask_indices = subnet_mask.nonzero(as_tuple=True)[0] - return subnet_mask_indices + subnet_mask_indices = subnet_mask.nonzero(as_tuple=True)[0] + return subnet_mask_indices - def select(self, train_loader): - """ Select the subnetwork mask. + def select(self, train_loader): + """ Select the subnetwork mask. Parameters ---------- train_loader : torch.data.utils.DataLoader each iterate is a training batch (X, y); `train_loader.dataset` needs to be set to access \\(N\\), size of the data set - """ - if self._indices is not None: - raise ValueError('Subnetwork mask already selected.') + """ + if self._indices is not None: + raise ValueError('Subnetwork mask already selected.') - subnet_mask = self.get_subnet_mask(train_loader) - self._indices = self.convert_subnet_mask_to_indices(subnet_mask) + subnet_mask = self.get_subnet_mask(train_loader) + self._indices = self.convert_subnet_mask_to_indices(subnet_mask) - def get_subnet_mask(self, train_loader): - """ Get the subnetwork mask. + def get_subnet_mask(self, train_loader): + """ Get the subnetwork mask. Parameters ---------- @@ -85,130 +85,130 @@ def get_subnet_mask(self, train_loader): each iterate is a training batch (X, y); `train_loader.dataset` needs to be set to access \\(N\\), size of the data set - Returns - ------- - subnet_mask: torch.Tensor - a binary vector of size (n_params) where 1s locate the subnetwork parameters - within the vectorized model parameters - """ - raise NotImplementedError + Returns + ------- + subnet_mask: torch.Tensor + a binary vector of size (n_params) where 1s locate the subnetwork parameters + within the vectorized model parameters + """ + raise NotImplementedError class ScoreBasedSubnetMask(SubnetMask): - """Baseclass for subnetwork masks defined by selecting the top-scoring parameters according to some criterion. + """Baseclass for subnetwork masks defined by selecting the top-scoring parameters according to some criterion. - Parameters - ---------- - model : torch.nn.Module - n_params_subnet : int - the number of parameters in the subnetwork (i.e. the number of top-scoring parameters to select) - """ - def __init__(self, model, n_params_subnet): - super().__init__(model) + Parameters + ---------- + model : torch.nn.Module + n_params_subnet : int + the number of parameters in the subnetwork (i.e. the number of top-scoring parameters to select) + """ + def __init__(self, model, n_params_subnet): + super().__init__(model) - if n_params_subnet is None: - raise ValueError(f'Need to pass number of subnetwork parameters when using subnetwork Laplace.') - if n_params_subnet > self._n_params: - raise ValueError(f'Subnetwork ({n_params_subnet}) cannot be larger than model ({self._n_params}).') - self._n_params_subnet = n_params_subnet - self._param_scores = None + if n_params_subnet is None: + raise ValueError(f'Need to pass number of subnetwork parameters when using subnetwork Laplace.') + if n_params_subnet > self._n_params: + raise ValueError(f'Subnetwork ({n_params_subnet}) cannot be larger than model ({self._n_params}).') + self._n_params_subnet = n_params_subnet + self._param_scores = None - @property - def n_params_subnet(self): - return self._n_params_subnet + @property + def n_params_subnet(self): + return self._n_params_subnet - def compute_param_scores(self, train_loader): - raise NotImplementedError + def compute_param_scores(self, train_loader): + raise NotImplementedError - def _check_param_scores(self): - if self._param_scores.shape != self.parameter_vector.shape: - raise ValueError('Parameter scores need to be of same shape as parameter vector.') + def _check_param_scores(self): + if self._param_scores.shape != self.parameter_vector.shape: + raise ValueError('Parameter scores need to be of same shape as parameter vector.') - def get_subnet_mask(self, train_loader): - """ Get the subnetwork mask by (descendingly) ranking parameters based on their scores.""" + def get_subnet_mask(self, train_loader): + """ Get the subnetwork mask by (descendingly) ranking parameters based on their scores.""" - if self._param_scores is None: - self._param_scores = self.compute_param_scores(train_loader) - self._check_param_scores() + if self._param_scores is None: + self._param_scores = self.compute_param_scores(train_loader) + self._check_param_scores() - idx = torch.argsort(self._param_scores, descending=True)[:self._n_params_subnet] - idx = idx.sort()[0] - subnet_mask = torch.zeros_like(self.parameter_vector).byte() - subnet_mask[idx] = 1 - return subnet_mask + idx = torch.argsort(self._param_scores, descending=True)[:self._n_params_subnet] + idx = idx.sort()[0] + subnet_mask = torch.zeros_like(self.parameter_vector).byte() + subnet_mask[idx] = 1 + return subnet_mask class RandomSubnetMask(ScoreBasedSubnetMask): - """Subnetwork mask of parameters sampled uniformly at random.""" - def compute_param_scores(self, train_loader): - return torch.rand_like(self.parameter_vector) + """Subnetwork mask of parameters sampled uniformly at random.""" + def compute_param_scores(self, train_loader): + return torch.rand_like(self.parameter_vector) class LargestMagnitudeSubnetMask(ScoreBasedSubnetMask): - """Subnetwork mask identifying the parameters with the largest magnitude. """ - def compute_param_scores(self, train_loader): - return self.parameter_vector + """Subnetwork mask identifying the parameters with the largest magnitude. """ + def compute_param_scores(self, train_loader): + return self.parameter_vector class LargestVarianceDiagLaplaceSubnetMask(ScoreBasedSubnetMask): - """Subnetwork mask identifying the parameters with the largest marginal variances - (estimated using a diagional Laplace approximation over all model parameters). - - Parameters - ---------- - model : torch.nn.Module - n_params_subnet : int - the number of parameters in the subnetwork (i.e. the number of top-scoring parameters to select) + """Subnetwork mask identifying the parameters with the largest marginal variances + (estimated using a diagional Laplace approximation over all model parameters). + + Parameters + ---------- + model : torch.nn.Module + n_params_subnet : int + the number of parameters in the subnetwork (i.e. the number of top-scoring parameters to select) diag_laplace_model : `laplace.baselaplace.DiagLaplace` diagonal Laplace model to use for variance estimation - """ - def __init__(self, model, n_params_subnet, diag_laplace_model): - super().__init__(model, n_params_subnet) - self.diag_laplace_model = diag_laplace_model + """ + def __init__(self, model, n_params_subnet, diag_laplace_model): + super().__init__(model, n_params_subnet) + self.diag_laplace_model = diag_laplace_model - def compute_param_scores(self, train_loader): - self.diag_laplace_model.fit(train_loader) - return self.diag_laplace_model.posterior_variance + def compute_param_scores(self, train_loader): + self.diag_laplace_model.fit(train_loader) + return self.diag_laplace_model.posterior_variance class LastLayerSubnetMask(SubnetMask): - """Subnetwork mask corresponding to the last layer of the neural network. + """Subnetwork mask corresponding to the last layer of the neural network. - Parameters - ---------- - model : torch.nn.Module + Parameters + ---------- + model : torch.nn.Module last_layer_name: str, default=None name of the model's last layer, if None it will be determined automatically - """ - def __init__(self, model, last_layer_name=None): - super().__init__(model) - self.model = FeatureExtractor(self.model, last_layer_name=last_layer_name) - self._n_params_subnet = None - - @property - def n_params_subnet(self): - if self._n_params_subnet is None: - self._check_select() - self._n_params_subnet = torch.count_nonzero(self._indices).item() - return self._n_params_subnet - - def get_subnet_mask(self, train_loader): - """ Get the subnetwork mask identifying the last layer.""" - - self.model.eval() - if self.model.last_layer is None: - X, _ = next(iter(train_loader)) - with torch.no_grad(): - self.model.find_last_layer(X[:1].to(self._device)) - - subnet_mask_list = [] - for name, layer in self.model.model.named_modules(): - if len(list(layer.children())) > 0: - continue - if name == self.model._last_layer_name: - mask_method = torch.ones_like - else: - mask_method = torch.zeros_like - subnet_mask_list.append(mask_method(parameters_to_vector(layer.parameters()))) - subnet_mask = torch.cat(subnet_mask_list).byte() - return subnet_mask + """ + def __init__(self, model, last_layer_name=None): + super().__init__(model) + self.model = FeatureExtractor(self.model, last_layer_name=last_layer_name) + self._n_params_subnet = None + + @property + def n_params_subnet(self): + if self._n_params_subnet is None: + self._check_select() + self._n_params_subnet = torch.count_nonzero(self._indices).item() + return self._n_params_subnet + + def get_subnet_mask(self, train_loader): + """ Get the subnetwork mask identifying the last layer.""" + + self.model.eval() + if self.model.last_layer is None: + X, _ = next(iter(train_loader)) + with torch.no_grad(): + self.model.find_last_layer(X[:1].to(self._device)) + + subnet_mask_list = [] + for name, layer in self.model.model.named_modules(): + if len(list(layer.children())) > 0: + continue + if name == self.model._last_layer_name: + mask_method = torch.ones_like + else: + mask_method = torch.zeros_like + subnet_mask_list.append(mask_method(parameters_to_vector(layer.parameters()))) + subnet_mask = torch.cat(subnet_mask_list).byte() + return subnet_mask From ac4542c47489af328191e98dc434864e0b202367 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 16:37:26 +0000 Subject: [PATCH 12/49] Remove model as argument from jacobians() as it has access to self.model now (same for last_layer_jacobians) --- laplace/baselaplace.py | 2 +- laplace/curvature/asdl.py | 9 ++++----- laplace/curvature/backpack.py | 5 ++--- laplace/curvature/curvature.py | 14 ++++++-------- laplace/lllaplace.py | 2 +- tests/test_jacobians.py | 10 +++++----- tests/test_lllaplace.py | 2 +- 7 files changed, 20 insertions(+), 24 deletions(-) diff --git a/laplace/baselaplace.py b/laplace/baselaplace.py index 47449350..df2d2c27 100644 --- a/laplace/baselaplace.py +++ b/laplace/baselaplace.py @@ -592,7 +592,7 @@ def predictive_samples(self, x, pred_type='glm', n_samples=100): @torch.enable_grad() def _glm_predictive_distribution(self, X): - Js, f_mu = self.backend.jacobians(self.model, X) + Js, f_mu = self.backend.jacobians(X) f_var = self.functional_variance(Js) return f_mu.detach(), f_var.detach() diff --git a/laplace/curvature/asdl.py b/laplace/curvature/asdl.py index d65c4334..25dcdcc9 100644 --- a/laplace/curvature/asdl.py +++ b/laplace/curvature/asdl.py @@ -19,13 +19,12 @@ class AsdlInterface(CurvatureInterface): """Interface for asdfghjkl backend. """ - def jacobians(self, model, x): + def jacobians(self, x): """Compute Jacobians \\(\\nabla_\\theta f(x;\\theta)\\) at current parameter \\(\\theta\\) using asdfghjkl's gradient per output dimension. Parameters ---------- - model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. @@ -37,12 +36,12 @@ def jacobians(self, model, x): output function `(batch, outputs)` """ Js = list() - for i in range(model.output_size): + for i in range(self.model.output_size): def loss_fn(outputs, targets): return outputs[:, i].sum() - f = batch_gradient(model, loss_fn, x, None).detach() - Jk = _get_batch_grad(model) + f = batch_gradient(self.model, loss_fn, x, None).detach() + Jk = _get_batch_grad(self.model) if self.subnetwork_indices is not None: Jk = Jk[:, self.subnetwork_indices] Js.append(Jk) diff --git a/laplace/curvature/backpack.py b/laplace/curvature/backpack.py index 42599729..a0885800 100644 --- a/laplace/curvature/backpack.py +++ b/laplace/curvature/backpack.py @@ -16,13 +16,12 @@ def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None) extend(self._model) extend(self.lossfunc) - def jacobians(self, model, x): + def jacobians(self, x): """Compute Jacobians \\(\\nabla_{\\theta} f(x;\\theta)\\) at current parameter \\(\\theta\\) using backpack's BatchGrad per output dimension. Parameters ---------- - model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. @@ -33,7 +32,7 @@ def jacobians(self, model, x): f : torch.Tensor output function `(batch, outputs)` """ - model = extend(model) + model = extend(self.model) to_stack = [] for i in range(model.output_size): model.zero_grad() diff --git a/laplace/curvature/curvature.py b/laplace/curvature/curvature.py index 47d87730..98b703b7 100644 --- a/laplace/curvature/curvature.py +++ b/laplace/curvature/curvature.py @@ -44,12 +44,11 @@ def __init__(self, model, likelihood, last_layer=False, subnetwork_indices=None) def _model(self): return self.model.last_layer if self.last_layer else self.model - def jacobians(self, model, x): + def jacobians(self, x): """Compute Jacobians \\(\\nabla_\\theta f(x;\\theta)\\) at current parameter \\(\\theta\\). Parameters ---------- - model : torch.nn.Module x : torch.Tensor input data `(batch, input_shape)` on compatible device with model. @@ -62,13 +61,12 @@ def jacobians(self, model, x): """ raise NotImplementedError - def last_layer_jacobians(self, model, x): + def last_layer_jacobians(self, x): """Compute Jacobians \\(\\nabla_{\\theta_\\textrm{last}} f(x;\\theta_\\textrm{last})\\) only at current last-layer parameter \\(\\theta_{\\textrm{last}}\\). Parameters ---------- - model : laplace.feature_extractor.FeatureExtractor x : torch.Tensor Returns @@ -78,7 +76,7 @@ def last_layer_jacobians(self, model, x): f : torch.Tensor output function `(batch, outputs)` """ - f, phi = model.forward_with_features(x) + f, phi = self.model.forward_with_features(x) bsize = phi.shape[0] output_size = f.shape[-1] @@ -86,7 +84,7 @@ def last_layer_jacobians(self, model, x): identity = torch.eye(output_size, device=x.device).unsqueeze(0).tile(bsize, 1, 1) # Jacobians are batch x output x params Js = torch.einsum('kp,kij->kijp', phi, identity).reshape(bsize, output_size, -1) - if model.last_layer.bias is not None: + if self.model.last_layer.bias is not None: Js = torch.cat([Js, identity], dim=2) return Js, f.detach() @@ -242,9 +240,9 @@ def full(self, x, y, **kwargs): raise ValueError('Stochastic approximation not implemented for full GGN.') if self.last_layer: - Js, f = self.last_layer_jacobians(self.model, x) + Js, f = self.last_layer_jacobians(x) else: - Js, f = self.jacobians(self.model, x) + Js, f = self.jacobians(x) loss, H_ggn = self._get_full_ggn(Js, f, y) return loss, H_ggn diff --git a/laplace/lllaplace.py b/laplace/lllaplace.py index 8336e670..8053e519 100644 --- a/laplace/lllaplace.py +++ b/laplace/lllaplace.py @@ -115,7 +115,7 @@ def fit(self, train_loader, override=True): self.mean = parameters_to_vector(self.model.last_layer.parameters()).detach() def _glm_predictive_distribution(self, X): - Js, f_mu = self.backend.last_layer_jacobians(self.model, X) + Js, f_mu = self.backend.last_layer_jacobians(X) f_var = self.functional_variance(Js) return f_mu.detach(), f_var.detach() diff --git a/tests/test_jacobians.py b/tests/test_jacobians.py index 8f676db1..45cd2f37 100644 --- a/tests/test_jacobians.py +++ b/tests/test_jacobians.py @@ -39,7 +39,7 @@ def X(): def test_linear_jacobians(linear_model, X, backend_cls): # jacobian of linear model is input X. backend = backend_cls(linear_model, 'classification') - Js, f = backend.jacobians(linear_model, X) + Js, f = backend.jacobians(X) # into Jacs shape (batch_size, output_size, params) true_Js = X.reshape(len(X), 1, -1) assert true_Js.shape == Js.shape @@ -51,7 +51,7 @@ def test_linear_jacobians(linear_model, X, backend_cls): def test_jacobians_singleoutput(singleoutput_model, X, backend_cls): model = singleoutput_model backend = backend_cls(model, 'classification') - Js, f = backend.jacobians(model, X) + Js, f = backend.jacobians(X) Js_naive, f_naive = jacobians_naive(model, X) assert Js.shape == Js_naive.shape assert torch.abs(Js-Js_naive).max() < 1e-6 @@ -63,7 +63,7 @@ def test_jacobians_singleoutput(singleoutput_model, X, backend_cls): def test_jacobians_multioutput(multioutput_model, X, backend_cls): model = multioutput_model backend = backend_cls(model, 'classification') - Js, f = backend.jacobians(model, X) + Js, f = backend.jacobians(X) Js_naive, f_naive = jacobians_naive(model, X) assert Js.shape == Js_naive.shape assert torch.abs(Js-Js_naive).max() < 1e-6 @@ -75,7 +75,7 @@ def test_jacobians_multioutput(multioutput_model, X, backend_cls): def test_last_layer_jacobians_singleoutput(singleoutput_model, X, backend_cls): model = FeatureExtractor(singleoutput_model) backend = backend_cls(model, 'classification') - Js, f = backend.last_layer_jacobians(model, X) + Js, f = backend.last_layer_jacobians(X) _, phi = model.forward_with_features(X) Js_naive, f_naive = jacobians_naive(model.last_layer, phi) assert Js.shape == Js_naive.shape @@ -88,7 +88,7 @@ def test_last_layer_jacobians_singleoutput(singleoutput_model, X, backend_cls): def test_last_layer_jacobians_multioutput(multioutput_model, X, backend_cls): model = FeatureExtractor(multioutput_model) backend = backend_cls(model, 'classification') - Js, f = backend.last_layer_jacobians(model, X) + Js, f = backend.last_layer_jacobians(X) _, phi = model.forward_with_features(X) Js_naive, f_naive = jacobians_naive(model.last_layer, phi) assert Js.shape == Js_naive.shape diff --git a/tests/test_lllaplace.py b/tests/test_lllaplace.py index 8b565687..40068d15 100644 --- a/tests/test_lllaplace.py +++ b/tests/test_lllaplace.py @@ -261,7 +261,7 @@ def test_laplace_functionality(laplace, lh, model, reg_loader, class_loader): Js, f = jacobians_naive(feature_extractor.last_layer, phi) true_f_var = torch.einsum('mkp,pq,mcq->mkc', Js, Sigma, Js) # test last-layer Jacobians - comp_Js, comp_f = lap.backend.last_layer_jacobians(lap.model, X) + comp_Js, comp_f = lap.backend.last_layer_jacobians(X) assert torch.allclose(Js, comp_Js) assert torch.allclose(f, comp_f) comp_f_var = lap.functional_variance(comp_Js) From 6a88c8254e16beb1cf7e9133b876c3dad2b8a984 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 20:42:50 +0000 Subject: [PATCH 13/49] Minor fixes for SubnetLaplace --- laplace/subnetlaplace.py | 2 +- laplace/subnetmask.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 90cd8aeb..67c6eb0e 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -65,7 +65,7 @@ class SubnetLaplace(FullLaplace): # key to map to correct subclass of BaseLaplace, (subset of weights, Hessian structure) _key = ('subnetwork', 'full') - def __init__(self, model, likelihood, subnetwork_mask=None, sigma_noise=1., prior_precision=1., + def __init__(self, model, likelihood, subnetwork_mask, sigma_noise=1., prior_precision=1., prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None, subnetmask_kwargs=None): super().__init__(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_precision, prior_mean=prior_mean, temperature=temperature, backend=backend, diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index 468734d4..45bb2c3e 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -189,7 +189,7 @@ def __init__(self, model, last_layer_name=None): def n_params_subnet(self): if self._n_params_subnet is None: self._check_select() - self._n_params_subnet = torch.count_nonzero(self._indices).item() + self._n_params_subnet = len(self._indices) return self._n_params_subnet def get_subnet_mask(self, train_loader): From c47c7d078af7451fa382940cc961056fab1c1b41 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 20:43:14 +0000 Subject: [PATCH 14/49] Add tests for SubnetLaplace and SubnetMasks --- tests/test_subnetlaplace.py | 205 ++++++++++++++++++++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 tests/test_subnetlaplace.py diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py new file mode 100644 index 00000000..83162cfc --- /dev/null +++ b/tests/test_subnetlaplace.py @@ -0,0 +1,205 @@ +import pytest +from itertools import product + +import torch +from torch import nn +from torch.nn.utils import parameters_to_vector +from torch.utils.data import DataLoader, TensorDataset + +from laplace import Laplace, SubnetLaplace +from laplace.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LastLayerSubnetMask, LargestVarianceDiagLaplaceSubnetMask + + +torch.manual_seed(240) +torch.set_default_tensor_type(torch.DoubleTensor) +score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask] +likelihoods = ['classification', 'regression'] + + +@pytest.fixture +def model(): + model = torch.nn.Sequential(nn.Linear(3, 20), nn.Linear(20, 2)) + model_params = list(model.parameters()) + setattr(model, 'n_params', len(parameters_to_vector(model_params))) + return model + + +@pytest.fixture +def class_loader(): + X = torch.randn(10, 3) + y = torch.randint(2, (10,)) + return DataLoader(TensorDataset(X, y), batch_size=3) + + +@pytest.fixture +def reg_loader(): + X = torch.randn(10, 3) + y = torch.randn(10, 2) + return DataLoader(TensorDataset(X, y), batch_size=3) + + +@pytest.mark.parametrize('likelihood', likelihoods) +def test_subnet_laplace_init(model, likelihood): + # use last-layer subnet mask for this test + subnetwork_mask = LastLayerSubnetMask + + # subnet Laplace with full Hessian should work + hessian_structure = 'full' + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + assert isinstance(lap, SubnetLaplace) + + # subnet Laplace with diag, kron or lowrank Hessians should raise errors + hessian_structure = 'diag' + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + hessian_structure = 'kron' + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + hessian_structure = 'lowrank' + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + + +@pytest.mark.parametrize('subnetwork_mask,likelihood', product(score_based_subnet_masks, likelihoods)) +def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader): + loader = class_loader if likelihood == 'classification' else reg_loader + + # should raise error if we don't pass number of subnet parameters within the subnetmask_kwargs + subnetmask_kwargs = dict() + with pytest.raises(TypeError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # should raise error if we set number of subnet parameters to None + subnetmask_kwargs = dict(n_params_subnet=None) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # should raise error if we set number of subnet parameters to be larger than number of model parameters + subnetmask_kwargs = dict(n_params_subnet=99999) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # define valid subnet Laplace model + n_params_subnet = 32 + subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) + + # should raise error if we try to access the subnet indices before the subnet has been selected + with pytest.raises(AttributeError): + lap.subnetwork_mask.indices + + # select subnet mask + lap.subnetwork_mask.select(loader) + + # should raise error if we try to select the subnet again + with pytest.raises(ValueError): + lap.subnetwork_mask.select(loader) + + # re-define valid subnet Laplace model + n_params_subnet = 32 + subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) + + # fit Laplace model (which internally selects the subnet mask) + lap.fit(loader) + + # check some parameters + assert lap.subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) + assert lap.subnetwork_mask.n_params_subnet == n_params_subnet + assert lap.n_params_subnet == n_params_subnet + + # check that Hessian and prior precision is of correct shape + assert lap.H.shape == (n_params_subnet, n_params_subnet) + assert lap.prior_precision_diag.shape == (n_params_subnet,) + + # should raise error if we try to fit the Laplace mdoel again + with pytest.raises(ValueError): + lap.fit(loader) + + +@pytest.mark.parametrize('likelihood', likelihoods) +def test_last_layer_subnet_mask(model, likelihood, class_loader, reg_loader): + subnetwork_mask = LastLayerSubnetMask + loader = class_loader if likelihood == 'classification' else reg_loader + + # should raise error if we pass number of subnet parameters + subnetmask_kwargs = dict(n_params_subnet=32) + with pytest.raises(TypeError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # should raise error if we pass invalid last-layer name + subnetmask_kwargs = dict(last_layer_name='123') + with pytest.raises(KeyError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # define valid last-layer subnet Laplace model (without passing the last-layer name) + subnetmask_kwargs = dict() + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) + + # define valid last-layer subnet Laplace model (with passing the last-layer name) + subnetmask_kwargs = dict(last_layer_name='1') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) + + # should raise error if we access number of subnet parameters before selecting the subnet + with pytest.raises(AttributeError): + n_params_subnet = lap.subnetwork_mask.n_params_subnet + + # fit Laplace model + lap.fit(loader) + + # check some parameters + n_params_subnet = 42 + assert lap.subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) + assert lap.subnetwork_mask.n_params_subnet == n_params_subnet + assert lap.n_params_subnet == n_params_subnet + + # check that Hessian and prior precision is of correct shape + assert lap.H.shape == (n_params_subnet, n_params_subnet) + assert lap.prior_precision_diag.shape == (n_params_subnet,) + + # check that Hessian is identical to that of a full LLLaplace model + lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer', hessian_structure='full') + lllap.fit(loader) + assert lllap.H.equal(lap.H) + + +@pytest.mark.parametrize('likelihood', likelihoods) +def test_full_subnet_mask(model, likelihood, class_loader, reg_loader): + loader = class_loader if likelihood == 'classification' else reg_loader + + # define full model 'subnet' mask class (i.e. where all parameters are part of the subnet) + class FullSubnetMask(SubnetMask): + @property + def n_params_subnet(self): + if self._n_params_subnet is None: + self._check_select() + self._n_params_subnet = len(self._indices) + return self._n_params_subnet + + def get_subnet_mask(self, train_loader): + return torch.ones(model.n_params).byte() + + # define and fit valid full subnet Laplace model + subnetwork_mask = FullSubnetMask + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full') + lap.fit(loader) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) + + # check some parameters + assert lap.subnetwork_mask.indices.equal(torch.tensor(list(range(model.n_params)))) + assert lap.subnetwork_mask.n_params_subnet == model.n_params + assert lap.n_params_subnet == model.n_params + + # check that the Hessian is identical to that of a all-weights FullLaplace model + full_lap = Laplace(model, likelihood=likelihood, subset_of_weights='all', hessian_structure='full') + full_lap.fit(loader) + assert full_lap.H.equal(lap.H) From da23af97263199d8fe2ab23a1db92e51f5cee550 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Fri, 10 Dec 2021 20:44:23 +0000 Subject: [PATCH 15/49] Change indentation to spaces in test_subnetlaplace.py --- tests/test_subnetlaplace.py | 278 ++++++++++++++++++------------------ 1 file changed, 139 insertions(+), 139 deletions(-) diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 83162cfc..875d9ca8 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -40,166 +40,166 @@ def reg_loader(): @pytest.mark.parametrize('likelihood', likelihoods) def test_subnet_laplace_init(model, likelihood): - # use last-layer subnet mask for this test - subnetwork_mask = LastLayerSubnetMask - - # subnet Laplace with full Hessian should work - hessian_structure = 'full' - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) - assert isinstance(lap, SubnetLaplace) - - # subnet Laplace with diag, kron or lowrank Hessians should raise errors - hessian_structure = 'diag' - with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) - hessian_structure = 'kron' - with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) - hessian_structure = 'lowrank' - with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + # use last-layer subnet mask for this test + subnetwork_mask = LastLayerSubnetMask + + # subnet Laplace with full Hessian should work + hessian_structure = 'full' + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + assert isinstance(lap, SubnetLaplace) + + # subnet Laplace with diag, kron or lowrank Hessians should raise errors + hessian_structure = 'diag' + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + hessian_structure = 'kron' + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + hessian_structure = 'lowrank' + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) @pytest.mark.parametrize('subnetwork_mask,likelihood', product(score_based_subnet_masks, likelihoods)) def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader): - loader = class_loader if likelihood == 'classification' else reg_loader + loader = class_loader if likelihood == 'classification' else reg_loader - # should raise error if we don't pass number of subnet parameters within the subnetmask_kwargs - subnetmask_kwargs = dict() - with pytest.raises(TypeError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + # should raise error if we don't pass number of subnet parameters within the subnetmask_kwargs + subnetmask_kwargs = dict() + with pytest.raises(TypeError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - # should raise error if we set number of subnet parameters to None - subnetmask_kwargs = dict(n_params_subnet=None) - with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + # should raise error if we set number of subnet parameters to None + subnetmask_kwargs = dict(n_params_subnet=None) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - # should raise error if we set number of subnet parameters to be larger than number of model parameters - subnetmask_kwargs = dict(n_params_subnet=99999) - with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + # should raise error if we set number of subnet parameters to be larger than number of model parameters + subnetmask_kwargs = dict(n_params_subnet=99999) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - # define valid subnet Laplace model - n_params_subnet = 32 - subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) + # define valid subnet Laplace model + n_params_subnet = 32 + subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) - # should raise error if we try to access the subnet indices before the subnet has been selected - with pytest.raises(AttributeError): - lap.subnetwork_mask.indices + # should raise error if we try to access the subnet indices before the subnet has been selected + with pytest.raises(AttributeError): + lap.subnetwork_mask.indices - # select subnet mask - lap.subnetwork_mask.select(loader) + # select subnet mask + lap.subnetwork_mask.select(loader) - # should raise error if we try to select the subnet again - with pytest.raises(ValueError): - lap.subnetwork_mask.select(loader) + # should raise error if we try to select the subnet again + with pytest.raises(ValueError): + lap.subnetwork_mask.select(loader) - # re-define valid subnet Laplace model - n_params_subnet = 32 - subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) + # re-define valid subnet Laplace model + n_params_subnet = 32 + subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) - # fit Laplace model (which internally selects the subnet mask) - lap.fit(loader) + # fit Laplace model (which internally selects the subnet mask) + lap.fit(loader) - # check some parameters - assert lap.subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) - assert lap.subnetwork_mask.n_params_subnet == n_params_subnet - assert lap.n_params_subnet == n_params_subnet + # check some parameters + assert lap.subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) + assert lap.subnetwork_mask.n_params_subnet == n_params_subnet + assert lap.n_params_subnet == n_params_subnet - # check that Hessian and prior precision is of correct shape - assert lap.H.shape == (n_params_subnet, n_params_subnet) - assert lap.prior_precision_diag.shape == (n_params_subnet,) + # check that Hessian and prior precision is of correct shape + assert lap.H.shape == (n_params_subnet, n_params_subnet) + assert lap.prior_precision_diag.shape == (n_params_subnet,) - # should raise error if we try to fit the Laplace mdoel again - with pytest.raises(ValueError): - lap.fit(loader) + # should raise error if we try to fit the Laplace mdoel again + with pytest.raises(ValueError): + lap.fit(loader) @pytest.mark.parametrize('likelihood', likelihoods) def test_last_layer_subnet_mask(model, likelihood, class_loader, reg_loader): - subnetwork_mask = LastLayerSubnetMask - loader = class_loader if likelihood == 'classification' else reg_loader - - # should raise error if we pass number of subnet parameters - subnetmask_kwargs = dict(n_params_subnet=32) - with pytest.raises(TypeError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - - # should raise error if we pass invalid last-layer name - subnetmask_kwargs = dict(last_layer_name='123') - with pytest.raises(KeyError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - - # define valid last-layer subnet Laplace model (without passing the last-layer name) - subnetmask_kwargs = dict() - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) - - # define valid last-layer subnet Laplace model (with passing the last-layer name) - subnetmask_kwargs = dict(last_layer_name='1') - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) - - # should raise error if we access number of subnet parameters before selecting the subnet - with pytest.raises(AttributeError): - n_params_subnet = lap.subnetwork_mask.n_params_subnet - - # fit Laplace model - lap.fit(loader) - - # check some parameters - n_params_subnet = 42 - assert lap.subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) - assert lap.subnetwork_mask.n_params_subnet == n_params_subnet - assert lap.n_params_subnet == n_params_subnet - - # check that Hessian and prior precision is of correct shape - assert lap.H.shape == (n_params_subnet, n_params_subnet) - assert lap.prior_precision_diag.shape == (n_params_subnet,) - - # check that Hessian is identical to that of a full LLLaplace model - lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer', hessian_structure='full') - lllap.fit(loader) - assert lllap.H.equal(lap.H) + subnetwork_mask = LastLayerSubnetMask + loader = class_loader if likelihood == 'classification' else reg_loader + + # should raise error if we pass number of subnet parameters + subnetmask_kwargs = dict(n_params_subnet=32) + with pytest.raises(TypeError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # should raise error if we pass invalid last-layer name + subnetmask_kwargs = dict(last_layer_name='123') + with pytest.raises(KeyError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # define valid last-layer subnet Laplace model (without passing the last-layer name) + subnetmask_kwargs = dict() + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) + + # define valid last-layer subnet Laplace model (with passing the last-layer name) + subnetmask_kwargs = dict(last_layer_name='1') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) + + # should raise error if we access number of subnet parameters before selecting the subnet + with pytest.raises(AttributeError): + n_params_subnet = lap.subnetwork_mask.n_params_subnet + + # fit Laplace model + lap.fit(loader) + + # check some parameters + n_params_subnet = 42 + assert lap.subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) + assert lap.subnetwork_mask.n_params_subnet == n_params_subnet + assert lap.n_params_subnet == n_params_subnet + + # check that Hessian and prior precision is of correct shape + assert lap.H.shape == (n_params_subnet, n_params_subnet) + assert lap.prior_precision_diag.shape == (n_params_subnet,) + + # check that Hessian is identical to that of a full LLLaplace model + lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer', hessian_structure='full') + lllap.fit(loader) + assert lllap.H.equal(lap.H) @pytest.mark.parametrize('likelihood', likelihoods) def test_full_subnet_mask(model, likelihood, class_loader, reg_loader): - loader = class_loader if likelihood == 'classification' else reg_loader - - # define full model 'subnet' mask class (i.e. where all parameters are part of the subnet) - class FullSubnetMask(SubnetMask): - @property - def n_params_subnet(self): - if self._n_params_subnet is None: - self._check_select() - self._n_params_subnet = len(self._indices) - return self._n_params_subnet - - def get_subnet_mask(self, train_loader): - return torch.ones(model.n_params).byte() - - # define and fit valid full subnet Laplace model - subnetwork_mask = FullSubnetMask - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full') - lap.fit(loader) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) - - # check some parameters - assert lap.subnetwork_mask.indices.equal(torch.tensor(list(range(model.n_params)))) - assert lap.subnetwork_mask.n_params_subnet == model.n_params - assert lap.n_params_subnet == model.n_params - - # check that the Hessian is identical to that of a all-weights FullLaplace model - full_lap = Laplace(model, likelihood=likelihood, subset_of_weights='all', hessian_structure='full') - full_lap.fit(loader) - assert full_lap.H.equal(lap.H) + loader = class_loader if likelihood == 'classification' else reg_loader + + # define full model 'subnet' mask class (i.e. where all parameters are part of the subnet) + class FullSubnetMask(SubnetMask): + @property + def n_params_subnet(self): + if self._n_params_subnet is None: + self._check_select() + self._n_params_subnet = len(self._indices) + return self._n_params_subnet + + def get_subnet_mask(self, train_loader): + return torch.ones(model.n_params).byte() + + # define and fit valid full subnet Laplace model + subnetwork_mask = FullSubnetMask + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full') + lap.fit(loader) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap.subnetwork_mask, subnetwork_mask) + + # check some parameters + assert lap.subnetwork_mask.indices.equal(torch.tensor(list(range(model.n_params)))) + assert lap.subnetwork_mask.n_params_subnet == model.n_params + assert lap.n_params_subnet == model.n_params + + # check that the Hessian is identical to that of a all-weights FullLaplace model + full_lap = Laplace(model, likelihood=likelihood, subset_of_weights='all', hessian_structure='full') + full_lap.fit(loader) + assert full_lap.H.equal(lap.H) From ddf840cc127fc1fb32c8e9519c093fc8517222d6 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 15 Dec 2021 17:11:55 +0000 Subject: [PATCH 16/49] Implement sample() method for SubnetLaplace (as e.g. required for the NN predictive) --- laplace/subnetlaplace.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 67c6eb0e..cce841fa 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -1,4 +1,5 @@ import torch +from torch.distributions import MultivariateNormal from laplace.baselaplace import FullLaplace, DiagLaplace @@ -75,7 +76,7 @@ def __init__(self, model, likelihood, subnetwork_mask, sigma_noise=1., prior_pre # instantiate and pass diagonal Laplace model for largest variance subnetwork selection self._subnetmask_kwargs.update(diag_laplace_model=DiagLaplace(self.model, likelihood, sigma_noise, prior_precision, prior_mean, temperature, backend, backend_kwargs)) - self.subnetwork_mask = subnetwork_mask(self.model, **self._subnetmask_kwargs) + self._subnetwork_mask = subnetwork_mask(self.model, **self._subnetmask_kwargs) self.n_params_subnet = None def _init_H(self): @@ -110,9 +111,20 @@ def fit(self, train_loader): """ # select subnetwork and pass it to backend - self.subnetwork_mask.select(train_loader) - self.backend.subnetwork_indices = self.subnetwork_mask.indices - self.n_params_subnet = self.subnetwork_mask.n_params_subnet + self._subnetwork_mask.select(train_loader) + self.backend.subnetwork_indices = self._subnetwork_mask.indices + self.n_params_subnet = self._subnetwork_mask.n_params_subnet # fit Laplace approximation over subnetwork super().fit(train_loader) + + def sample(self, n_samples=100): + # sample parameters just of the subnetwork + subnet_mean = self.mean[self._subnetwork_mask.indices] + dist = MultivariateNormal(loc=subnet_mean, scale_tril=self.posterior_scale) + subnet_samples = dist.sample((n_samples,)) + + # set all other parameters to their MAP estimates + full_samples = self.mean.repeat(n_samples, 1) + full_samples[:, self._subnetwork_mask.indices] = subnet_samples + return full_samples From 7a4848982b1946af1b5be5050d26864f614dcc24 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 15 Dec 2021 17:12:12 +0000 Subject: [PATCH 17/49] Add tests for SubnetLaplace predictives --- tests/test_subnetlaplace.py | 91 +++++++++++++++++++++++++++++++------ 1 file changed, 76 insertions(+), 15 deletions(-) diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 875d9ca8..8ce14e6f 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -13,6 +13,7 @@ torch.manual_seed(240) torch.set_default_tensor_type(torch.DoubleTensor) score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask] +all_subnet_masks = score_based_subnet_masks + [LastLayerSubnetMask] likelihoods = ['classification', 'regression'] @@ -84,32 +85,32 @@ def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_load subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) # should raise error if we try to access the subnet indices before the subnet has been selected with pytest.raises(AttributeError): - lap.subnetwork_mask.indices + lap._subnetwork_mask.indices # select subnet mask - lap.subnetwork_mask.select(loader) + lap._subnetwork_mask.select(loader) # should raise error if we try to select the subnet again with pytest.raises(ValueError): - lap.subnetwork_mask.select(loader) + lap._subnetwork_mask.select(loader) # re-define valid subnet Laplace model n_params_subnet = 32 subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) # fit Laplace model (which internally selects the subnet mask) lap.fit(loader) # check some parameters - assert lap.subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) - assert lap.subnetwork_mask.n_params_subnet == n_params_subnet + assert lap._subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) + assert lap._subnetwork_mask.n_params_subnet == n_params_subnet assert lap.n_params_subnet == n_params_subnet # check that Hessian and prior precision is of correct shape @@ -140,25 +141,25 @@ def test_last_layer_subnet_mask(model, likelihood, class_loader, reg_loader): subnetmask_kwargs = dict() lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) # define valid last-layer subnet Laplace model (with passing the last-layer name) subnetmask_kwargs = dict(last_layer_name='1') lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) # should raise error if we access number of subnet parameters before selecting the subnet with pytest.raises(AttributeError): - n_params_subnet = lap.subnetwork_mask.n_params_subnet + n_params_subnet = lap._subnetwork_mask.n_params_subnet # fit Laplace model lap.fit(loader) # check some parameters n_params_subnet = 42 - assert lap.subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) - assert lap.subnetwork_mask.n_params_subnet == n_params_subnet + assert lap._subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) + assert lap._subnetwork_mask.n_params_subnet == n_params_subnet assert lap.n_params_subnet == n_params_subnet # check that Hessian and prior precision is of correct shape @@ -192,14 +193,74 @@ def get_subnet_mask(self, train_loader): lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full') lap.fit(loader) assert isinstance(lap, SubnetLaplace) - assert isinstance(lap.subnetwork_mask, subnetwork_mask) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) # check some parameters - assert lap.subnetwork_mask.indices.equal(torch.tensor(list(range(model.n_params)))) - assert lap.subnetwork_mask.n_params_subnet == model.n_params + assert lap._subnetwork_mask.indices.equal(torch.tensor(list(range(model.n_params)))) + assert lap._subnetwork_mask.n_params_subnet == model.n_params assert lap.n_params_subnet == model.n_params # check that the Hessian is identical to that of a all-weights FullLaplace model full_lap = Laplace(model, likelihood=likelihood, subset_of_weights='all', hessian_structure='full') full_lap.fit(loader) assert full_lap.H.equal(lap.H) + + +@pytest.mark.parametrize('subnetwork_mask', all_subnet_masks) +def test_regression_predictive(model, reg_loader, subnetwork_mask): + subnetmask_kwargs = dict(n_params_subnet=32) if subnetwork_mask in score_based_subnet_masks else dict() + lap = Laplace(model, likelihood='regression', subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) + + lap.fit(reg_loader) + X, _ = reg_loader.dataset.tensors + f = model(X) + + # error + with pytest.raises(ValueError): + lap(X, pred_type='linear') + + # GLM predictive + f_mu, f_var = lap(X, pred_type='glm') + assert torch.allclose(f_mu, f) + assert f_var.shape == torch.Size([f_mu.shape[0], f_mu.shape[1], f_mu.shape[1]]) + assert len(f_mu) == len(X) + + # NN predictive (only diagonal variance estimation) + f_mu, f_var = lap(X, pred_type='nn') + assert f_mu.shape == f_var.shape + assert f_var.shape == torch.Size([f_mu.shape[0], f_mu.shape[1]]) + assert len(f_mu) == len(X) + + +@pytest.mark.parametrize('subnetwork_mask', all_subnet_masks) +def test_classification_predictive(model, class_loader, subnetwork_mask): + subnetmask_kwargs = dict(n_params_subnet=32) if subnetwork_mask in score_based_subnet_masks else dict() + lap = Laplace(model, likelihood='classification', subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) + + lap.fit(class_loader) + X, _ = class_loader.dataset.tensors + f = torch.softmax(model(X), dim=-1) + + # error + with pytest.raises(ValueError): + lap(X, pred_type='linear') + + # GLM predictive + f_pred = lap(X, pred_type='glm', link_approx='mc', n_samples=100) + assert f_pred.shape == f.shape + assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1 + f_pred = lap(X, pred_type='glm', link_approx='probit') + assert f_pred.shape == f.shape + assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1 + f_pred = lap(X, pred_type='glm', link_approx='bridge') + assert f_pred.shape == f.shape + assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1 + + # NN predictive + f_pred = lap(X, pred_type='nn', n_samples=100) + assert f_pred.shape == f.shape + assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1 \ No newline at end of file From bcc9ca7df8983a59357dd6e38ac5fbfe53d4bb44 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Thu, 16 Dec 2021 13:36:36 +0000 Subject: [PATCH 18/49] Fix small bug in LastLayerSubnetMask --- laplace/subnetmask.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index 45bb2c3e..44d5c2fc 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -101,7 +101,7 @@ class ScoreBasedSubnetMask(SubnetMask): ---------- model : torch.nn.Module n_params_subnet : int - the number of parameters in the subnetwork (i.e. the number of top-scoring parameters to select) + number of parameters in the subnetwork (i.e. number of top-scoring parameters to select) """ def __init__(self, model, n_params_subnet): super().__init__(model) @@ -152,13 +152,13 @@ def compute_param_scores(self, train_loader): class LargestVarianceDiagLaplaceSubnetMask(ScoreBasedSubnetMask): """Subnetwork mask identifying the parameters with the largest marginal variances - (estimated using a diagional Laplace approximation over all model parameters). + (estimated using a diagonal Laplace approximation over all model parameters). Parameters ---------- model : torch.nn.Module n_params_subnet : int - the number of parameters in the subnetwork (i.e. the number of top-scoring parameters to select) + number of parameters in the subnetwork (i.e. number of top-scoring parameters to select) diag_laplace_model : `laplace.baselaplace.DiagLaplace` diagonal Laplace model to use for variance estimation """ @@ -203,7 +203,7 @@ def get_subnet_mask(self, train_loader): subnet_mask_list = [] for name, layer in self.model.model.named_modules(): - if len(list(layer.children())) > 0: + if len(list(layer.children())) > 0 or len(list(layer.parameters())) == 0: continue if name == self.model._last_layer_name: mask_method = torch.ones_like From 381f79d44e3d48ad997e001531d9fd8348f02b01 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Thu, 16 Dec 2021 13:37:22 +0000 Subject: [PATCH 19/49] Add reference to subnetwork inference paper to SubnetLaplace docstring --- laplace/subnetlaplace.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index cce841fa..16ed87cb 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -12,8 +12,8 @@ class SubnetLaplace(FullLaplace): """Class for subnetwork Laplace, which computes the Laplace approximation over - just a subset of the model parameters (i.e. a subnetwork within the neural network). - Subnetwork Laplace only supports a full Hessian approximation; other Hessian + just a subset of the model parameters (i.e. a subnetwork within the neural network), + as proposed in [1]. Subnetwork Laplace only supports a full Hessian approximation; other approximations could be used in theory, but would not make as much sense conceptually. A Laplace approximation is represented by a MAP which is given by the @@ -38,6 +38,12 @@ class SubnetLaplace(FullLaplace): matrix. Mathematically, we have \\(P \\in \\mathbb{R}^{P \\times P}\\). See `FullLaplace` and `BaseLaplace` for the full interface. + References + ---------- + [1] Daxberger, E., Nalisnick, E., Allingham, JU., Antorán, J., Hernández-Lobato, JM. + [*Bayesian Deep Learning via Subnetwork Inference*](https://arxiv.org/abs/2010.14689). + ICML 2021. + Parameters ---------- model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` From f345ac55728ce74b7edcff465b3cd0d380a57b2d Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Mon, 20 Dec 2021 13:50:49 +0000 Subject: [PATCH 20/49] Add SubnetMask that allows specifying subnet parameters or modules by name --- laplace/subnetmask.py | 137 ++++++++++++++++++++------- tests/test_subnetlaplace.py | 179 ++++++++++++++++++++++++++++-------- 2 files changed, 248 insertions(+), 68 deletions(-) diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index 44d5c2fc..8b0b8d29 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -1,9 +1,11 @@ +from copy import deepcopy + import torch from torch.nn.utils import parameters_to_vector from laplace.feature_extractor import FeatureExtractor -__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LastLayerSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask'] +__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] class SubnetMask: @@ -21,10 +23,6 @@ def __init__(self, model): self._indices = None self._n_params_subnet = None - @property - def n_params_subnet(self): - raise NotImplementedError - def _check_select(self): if self._indices is None: raise AttributeError('Subnetwork mask not selected. Run select() first.') @@ -34,6 +32,13 @@ def indices(self): self._check_select() return self._indices + @property + def n_params_subnet(self): + if self._n_params_subnet is None: + self._check_select() + self._n_params_subnet = len(self._indices) + return self._n_params_subnet + def convert_subnet_mask_to_indices(self, subnet_mask): """Converts a subnetwork mask into subnetwork indices. @@ -113,10 +118,6 @@ def __init__(self, model, n_params_subnet): self._n_params_subnet = n_params_subnet self._param_scores = None - @property - def n_params_subnet(self): - return self._n_params_subnet - def compute_param_scores(self, train_loader): raise NotImplementedError @@ -171,44 +172,118 @@ def compute_param_scores(self, train_loader): return self.diag_laplace_model.posterior_variance -class LastLayerSubnetMask(SubnetMask): - """Subnetwork mask corresponding to the last layer of the neural network. +class ParamNameSubnetMask(SubnetMask): + """Subnetwork mask corresponding to the specified parameters of the neural network. Parameters ---------- model : torch.nn.Module - last_layer_name: str, default=None - name of the model's last layer, if None it will be determined automatically + parameter_names: List[str] + list of names of the parameters (as in `model.named_parameters()`) that define the subnetwork """ - def __init__(self, model, last_layer_name=None): + def __init__(self, model, parameter_names): super().__init__(model) - self.model = FeatureExtractor(self.model, last_layer_name=last_layer_name) + self._parameter_names = parameter_names self._n_params_subnet = None - @property - def n_params_subnet(self): - if self._n_params_subnet is None: - self._check_select() - self._n_params_subnet = len(self._indices) - return self._n_params_subnet + def _check_param_names(self): + param_names = deepcopy(self._parameter_names) + if len(param_names) == 0: + raise ValueError(f'Parameter name list cannot be empty.') + + for name, _ in self.model.named_parameters(): + if name in param_names: + param_names.remove(name) + if len(param_names) > 0: + raise ValueError(f'Parameters {param_names} do not exist in model.') def get_subnet_mask(self, train_loader): - """ Get the subnetwork mask identifying the last layer.""" + """ Get the subnetwork mask identifying the specified parameters.""" - self.model.eval() - if self.model.last_layer is None: - X, _ = next(iter(train_loader)) - with torch.no_grad(): - self.model.find_last_layer(X[:1].to(self._device)) + self._check_param_names() subnet_mask_list = [] - for name, layer in self.model.model.named_modules(): - if len(list(layer.children())) > 0 or len(list(layer.parameters())) == 0: + for name, param in self.model.named_parameters(): + if name in self._parameter_names: + mask_method = torch.ones_like + else: + mask_method = torch.zeros_like + subnet_mask_list.append(mask_method(parameters_to_vector(param))) + subnet_mask = torch.cat(subnet_mask_list).byte() + return subnet_mask + + +class ModuleNameSubnetMask(SubnetMask): + """Subnetwork mask corresponding to the specified modules of the neural network. + + Parameters + ---------- + model : torch.nn.Module + parameter_names: List[str] + list of names of the modules (as in `model.named_modules()`) that define the subnetwork; + the modules cannot have children, i.e. need to be leaf modules + """ + def __init__(self, model, module_names): + super().__init__(model) + self._module_names = module_names + self._n_params_subnet = None + + def _check_module_names(self): + module_names = deepcopy(self._module_names) + if len(module_names) == 0: + raise ValueError(f'Module name list cannot be empty.') + + for name, module in self.model.named_modules(): + if name in module_names: + if len(list(module.children())) > 0: + raise ValueError(f'Module "{name}" has children, which is not supported.') + elif len(list(module.parameters())) == 0: + raise ValueError(f'Module "{name}" does not have any parameters.') + else: + module_names.remove(name) + if len(module_names) > 0: + raise ValueError(f'Modules {module_names} do not exist in model.') + + def get_subnet_mask(self, train_loader): + """ Get the subnetwork mask identifying the specified modules.""" + + self._check_module_names() + + subnet_mask_list = [] + for name, module in self.model.named_modules(): + if len(list(module.children())) > 0 or len(list(module.parameters())) == 0: continue - if name == self.model._last_layer_name: + if name in self._module_names: mask_method = torch.ones_like else: mask_method = torch.zeros_like - subnet_mask_list.append(mask_method(parameters_to_vector(layer.parameters()))) + subnet_mask_list.append(mask_method(parameters_to_vector(module.parameters()))) subnet_mask = torch.cat(subnet_mask_list).byte() return subnet_mask + + +class LastLayerSubnetMask(ModuleNameSubnetMask): + """Subnetwork mask corresponding to the last layer of the neural network. + + Parameters + ---------- + model : torch.nn.Module + last_layer_name: str, default=None + name of the model's last layer, if None it will be determined automatically + """ + def __init__(self, model, last_layer_name=None): + super().__init__(model, None) + self._feature_extractor = FeatureExtractor(self.model, last_layer_name=last_layer_name) + self._n_params_subnet = None + + def get_subnet_mask(self, train_loader): + """ Get the subnetwork mask identifying the last layer.""" + + self._feature_extractor.eval() + if self._feature_extractor.last_layer is None: + X = next(iter(train_loader))[0] + with torch.no_grad(): + self._feature_extractor.find_last_layer(X[:1].to(self._device)) + self._module_names = [self._feature_extractor._last_layer_name] + + return super().get_subnet_mask(train_loader) diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 8ce14e6f..b6687c96 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -7,13 +7,14 @@ from torch.utils.data import DataLoader, TensorDataset from laplace import Laplace, SubnetLaplace -from laplace.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LastLayerSubnetMask, LargestVarianceDiagLaplaceSubnetMask +from laplace.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask torch.manual_seed(240) torch.set_default_tensor_type(torch.DoubleTensor) score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask] -all_subnet_masks = score_based_subnet_masks + [LastLayerSubnetMask] +layer_subnet_masks = [ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask] +all_subnet_masks = score_based_subnet_masks + layer_subnet_masks likelihoods = ['classification', 'regression'] @@ -64,25 +65,26 @@ def test_subnet_laplace_init(model, likelihood): @pytest.mark.parametrize('subnetwork_mask,likelihood', product(score_based_subnet_masks, likelihoods)) def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader): loader = class_loader if likelihood == 'classification' else reg_loader + model_params = parameters_to_vector(model.parameters()) + subnetmask_kwargs = dict() # should raise error if we don't pass number of subnet parameters within the subnetmask_kwargs - subnetmask_kwargs = dict() with pytest.raises(TypeError): lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) # should raise error if we set number of subnet parameters to None - subnetmask_kwargs = dict(n_params_subnet=None) + subnetmask_kwargs.update(n_params_subnet=None) with pytest.raises(ValueError): lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) # should raise error if we set number of subnet parameters to be larger than number of model parameters - subnetmask_kwargs = dict(n_params_subnet=99999) + subnetmask_kwargs.update(n_params_subnet=99999) with pytest.raises(ValueError): lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) # define valid subnet Laplace model n_params_subnet = 32 - subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) + subnetmask_kwargs.update(n_params_subnet=n_params_subnet) lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) assert isinstance(lap._subnetwork_mask, subnetwork_mask) @@ -100,7 +102,7 @@ def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_load # re-define valid subnet Laplace model n_params_subnet = 32 - subnetmask_kwargs = dict(n_params_subnet=n_params_subnet) + subnetmask_kwargs.update(n_params_subnet=n_params_subnet) lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) assert isinstance(lap._subnetwork_mask, subnetwork_mask) @@ -112,6 +114,7 @@ def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_load assert lap._subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) assert lap._subnetwork_mask.n_params_subnet == n_params_subnet assert lap.n_params_subnet == n_params_subnet + assert parameters_to_vector(model.parameters()).equal(model_params) # check that Hessian and prior precision is of correct shape assert lap.H.shape == (n_params_subnet, n_params_subnet) @@ -122,42 +125,135 @@ def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_load lap.fit(loader) -@pytest.mark.parametrize('likelihood', likelihoods) -def test_last_layer_subnet_mask(model, likelihood, class_loader, reg_loader): - subnetwork_mask = LastLayerSubnetMask +@pytest.mark.parametrize('subnetwork_mask,likelihood', product(layer_subnet_masks, likelihoods)) +def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader): loader = class_loader if likelihood == 'classification' else reg_loader + # fit last-layer Laplace model + lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer', hessian_structure='full') + lllap.fit(loader) + # should raise error if we pass number of subnet parameters subnetmask_kwargs = dict(n_params_subnet=32) with pytest.raises(TypeError): lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - # should raise error if we pass invalid last-layer name - subnetmask_kwargs = dict(last_layer_name='123') - with pytest.raises(KeyError): + if subnetwork_mask == ParamNameSubnetMask: + # should raise error if we pass no parameter name list + subnetmask_kwargs = dict() + with pytest.raises(TypeError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # should raise error if we pass an empty parameter name list + subnetmask_kwargs = dict(parameter_names=[]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + lap.fit(loader) + + # should raise error if we pass a parameter name list with invalid parameter names + subnetmask_kwargs = dict(parameter_names=['123']) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + lap.fit(loader) + + # define last-layer Laplace model by parameter names and check that Hessian is identical to that of a full LLLaplace model + subnetmask_kwargs = dict(parameter_names=['1.weight', '1.bias']) lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + lap.fit(loader) + assert lllap.H.equal(lap.H) - # define valid last-layer subnet Laplace model (without passing the last-layer name) - subnetmask_kwargs = dict() - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) + # define valid parameter name subnet Laplace model + subnetmask_kwargs = dict(parameter_names=['0.weight', '1.bias']) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + n_params_subnet = 62 + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) - # define valid last-layer subnet Laplace model (with passing the last-layer name) - subnetmask_kwargs = dict(last_layer_name='1') - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) + # should raise error if we access number of subnet parameters before selecting the subnet + with pytest.raises(AttributeError): + n_params_subnet = lap._subnetwork_mask.n_params_subnet - # should raise error if we access number of subnet parameters before selecting the subnet - with pytest.raises(AttributeError): - n_params_subnet = lap._subnetwork_mask.n_params_subnet + # fit Laplace model + lap.fit(loader) - # fit Laplace model - lap.fit(loader) + elif subnetwork_mask == ModuleNameSubnetMask: + # should raise error if we pass no module name list + subnetmask_kwargs = dict() + with pytest.raises(TypeError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # should raise error if we pass an empty module name list + subnetmask_kwargs = dict(module_names=[]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + lap.fit(loader) + + # should raise error if we pass a module name list with invalid module names + subnetmask_kwargs = dict(module_names=['123']) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + lap.fit(loader) + + # define last-layer Laplace model by module name and check that Hessian is identical to that of a full LLLaplace model + subnetmask_kwargs = dict(module_names=['1']) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + lap.fit(loader) + assert lllap.H.equal(lap.H) + + # define valid parameter name subnet Laplace model + subnetmask_kwargs = dict(module_names=['0']) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + n_params_subnet = 80 + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) + + # should raise error if we access number of subnet parameters before selecting the subnet + with pytest.raises(AttributeError): + n_params_subnet = lap._subnetwork_mask.n_params_subnet + + # fit Laplace model + lap.fit(loader) + + elif subnetwork_mask == LastLayerSubnetMask: + # should raise error if we pass invalid last-layer name + subnetmask_kwargs = dict(last_layer_name='123') + with pytest.raises(KeyError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + + # define valid last-layer subnet Laplace model (without passing the last-layer name) + subnetmask_kwargs = dict() + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) + + # should raise error if we access number of subnet parameters before selecting the subnet + with pytest.raises(AttributeError): + n_params_subnet = lap._subnetwork_mask.n_params_subnet + + # fit Laplace model + lap.fit(loader) + + # check that Hessian is identical to that of a full LLLaplace model + assert lllap.H.equal(lap.H) + + # define valid last-layer subnet Laplace model (with passing the last-layer name) + subnetmask_kwargs = dict(last_layer_name='1') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + n_params_subnet = 42 + assert isinstance(lap, SubnetLaplace) + assert isinstance(lap._subnetwork_mask, subnetwork_mask) + + # should raise error if we access number of subnet parameters before selecting the subnet + with pytest.raises(AttributeError): + n_params_subnet = lap._subnetwork_mask.n_params_subnet + + # fit Laplace model + lap.fit(loader) + + # check that Hessian is identical to that of a full LLLaplace model + assert lllap.H.equal(lap.H) # check some parameters - n_params_subnet = 42 assert lap._subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) assert lap._subnetwork_mask.n_params_subnet == n_params_subnet assert lap.n_params_subnet == n_params_subnet @@ -166,11 +262,6 @@ def test_last_layer_subnet_mask(model, likelihood, class_loader, reg_loader): assert lap.H.shape == (n_params_subnet, n_params_subnet) assert lap.prior_precision_diag.shape == (n_params_subnet,) - # check that Hessian is identical to that of a full LLLaplace model - lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer', hessian_structure='full') - lllap.fit(loader) - assert lllap.H.equal(lap.H) - @pytest.mark.parametrize('likelihood', likelihoods) def test_full_subnet_mask(model, likelihood, class_loader, reg_loader): @@ -208,7 +299,14 @@ def get_subnet_mask(self, train_loader): @pytest.mark.parametrize('subnetwork_mask', all_subnet_masks) def test_regression_predictive(model, reg_loader, subnetwork_mask): - subnetmask_kwargs = dict(n_params_subnet=32) if subnetwork_mask in score_based_subnet_masks else dict() + if subnetwork_mask in score_based_subnet_masks: + subnetmask_kwargs = dict(n_params_subnet=32) + elif subnetwork_mask == ParamNameSubnetMask: + subnetmask_kwargs = dict(parameter_names=['0.weight', '1.bias']) + elif subnetwork_mask == ModuleNameSubnetMask: + subnetmask_kwargs = dict(module_names=['0']) + else: + subnetmask_kwargs = dict() lap = Laplace(model, likelihood='regression', subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) assert isinstance(lap._subnetwork_mask, subnetwork_mask) @@ -236,7 +334,14 @@ def test_regression_predictive(model, reg_loader, subnetwork_mask): @pytest.mark.parametrize('subnetwork_mask', all_subnet_masks) def test_classification_predictive(model, class_loader, subnetwork_mask): - subnetmask_kwargs = dict(n_params_subnet=32) if subnetwork_mask in score_based_subnet_masks else dict() + if subnetwork_mask in score_based_subnet_masks: + subnetmask_kwargs = dict(n_params_subnet=32) + elif subnetwork_mask == ParamNameSubnetMask: + subnetmask_kwargs = dict(parameter_names=['0.weight', '1.bias']) + elif subnetwork_mask == ModuleNameSubnetMask: + subnetmask_kwargs = dict(module_names=['0']) + else: + subnetmask_kwargs = dict() lap = Laplace(model, likelihood='classification', subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) assert isinstance(lap._subnetwork_mask, subnetwork_mask) @@ -263,4 +368,4 @@ def test_classification_predictive(model, class_loader, subnetwork_mask): # NN predictive f_pred = lap(X, pred_type='nn', n_samples=100) assert f_pred.shape == f.shape - assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1 \ No newline at end of file + assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1 From ea16f3467181f5e7dafd1559404cee2a3805224b Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Mon, 20 Dec 2021 17:30:15 +0000 Subject: [PATCH 21/49] Make subnet mask type check independent of CUDA and change default type to bool --- laplace/subnetmask.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index 8b0b8d29..caa29094 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -55,9 +55,9 @@ def convert_subnet_mask_to_indices(self, subnet_mask): """ if not isinstance(subnet_mask, torch.Tensor): raise ValueError('Subnetwork mask needs to be torch.Tensor!') - elif subnet_mask.type() not in ['torch.ByteTensor', 'torch.IntTensor', 'torch.LongTensor'] or\ + elif subnet_mask.dtype not in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool] or\ len(subnet_mask.shape) != 1: - raise ValueError('Subnetwork mask needs to be 1-dimensional torch.{Byte,Int,Long}Tensor!') + raise ValueError('Subnetwork mask needs to be 1-dimensional integral or boolean tensor!') elif len(subnet_mask) != self._n_params or\ len(subnet_mask[subnet_mask == 0]) + len(subnet_mask[subnet_mask == 1]) != self._n_params: raise ValueError('Subnetwork mask needs to be a binary vector of size (n_params) where 1s'\ @@ -134,7 +134,7 @@ def get_subnet_mask(self, train_loader): idx = torch.argsort(self._param_scores, descending=True)[:self._n_params_subnet] idx = idx.sort()[0] - subnet_mask = torch.zeros_like(self.parameter_vector).byte() + subnet_mask = torch.zeros_like(self.parameter_vector).bool() subnet_mask[idx] = 1 return subnet_mask @@ -209,7 +209,7 @@ def get_subnet_mask(self, train_loader): else: mask_method = torch.zeros_like subnet_mask_list.append(mask_method(parameters_to_vector(param))) - subnet_mask = torch.cat(subnet_mask_list).byte() + subnet_mask = torch.cat(subnet_mask_list).bool() return subnet_mask @@ -258,7 +258,7 @@ def get_subnet_mask(self, train_loader): else: mask_method = torch.zeros_like subnet_mask_list.append(mask_method(parameters_to_vector(module.parameters()))) - subnet_mask = torch.cat(subnet_mask_list).byte() + subnet_mask = torch.cat(subnet_mask_list).bool() return subnet_mask From 176d678093fa41aa267d3d522890491571ac3934 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 21 Dec 2021 09:36:38 +0000 Subject: [PATCH 22/49] Change LargestMagnitudeSubnetMask to use absolute parameter values --- laplace/subnetmask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index caa29094..3030f163 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -148,7 +148,7 @@ def compute_param_scores(self, train_loader): class LargestMagnitudeSubnetMask(ScoreBasedSubnetMask): """Subnetwork mask identifying the parameters with the largest magnitude. """ def compute_param_scores(self, train_loader): - return self.parameter_vector + return self.parameter_vector.abs() class LargestVarianceDiagLaplaceSubnetMask(ScoreBasedSubnetMask): From 59603e8e7b0d1cc6c0cab165162565aead74e196 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 21 Dec 2021 09:38:34 +0000 Subject: [PATCH 23/49] Small refactoring of SubnetLaplace tests --- tests/test_subnetlaplace.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index b6687c96..3d15e736 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -269,13 +269,6 @@ def test_full_subnet_mask(model, likelihood, class_loader, reg_loader): # define full model 'subnet' mask class (i.e. where all parameters are part of the subnet) class FullSubnetMask(SubnetMask): - @property - def n_params_subnet(self): - if self._n_params_subnet is None: - self._check_select() - self._n_params_subnet = len(self._indices) - return self._n_params_subnet - def get_subnet_mask(self, train_loader): return torch.ones(model.n_params).byte() From 8410ecaf23477e6e75207ff0fa6fbe1105f1a2f2 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 21 Dec 2021 09:43:52 +0000 Subject: [PATCH 24/49] Add implementation of SubnetMask that selects params with largest variance, estimated via diagonal SWAG --- laplace/subnetmask.py | 38 ++++++++++++++++- laplace/swag.py | 82 +++++++++++++++++++++++++++++++++++++ tests/test_subnetlaplace.py | 8 ++-- 3 files changed, 124 insertions(+), 4 deletions(-) create mode 100644 laplace/swag.py diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index 3030f163..36a135bd 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -1,11 +1,13 @@ from copy import deepcopy import torch +from torch.nn import CrossEntropyLoss, MSELoss from torch.nn.utils import parameters_to_vector from laplace.feature_extractor import FeatureExtractor +from laplace.swag import fit_diagonal_swag -__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] +__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', 'LargestVarianceSWAGSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] class SubnetMask: @@ -172,6 +174,40 @@ def compute_param_scores(self, train_loader): return self.diag_laplace_model.posterior_variance +class LargestVarianceSWAGSubnetMask(ScoreBasedSubnetMask): + """Subnetwork mask identifying the parameters with the largest marginal variances + (estimated using diagonal SWAG over all model parameters). + + Parameters + ---------- + model : torch.nn.Module + n_params_subnet : int + number of parameters in the subnetwork (i.e. number of top-scoring parameters to select) + likelihood : str + 'classification' or 'regression' + swag_n_snapshots : int + number of model snapshots to collect for SWAG + swag_snapshot_freq : int + SWAG snapshot collection frequency (in epochs) + swag_lr : float + learning rate for SWAG snapshot collection + """ + def __init__(self, model, n_params_subnet, likelihood='classification', swag_n_snapshots=40, swag_snapshot_freq=1, swag_lr=0.01): + super().__init__(model, n_params_subnet) + self.likelihood = likelihood + self.swag_n_snapshots = swag_n_snapshots + self.swag_snapshot_freq = swag_snapshot_freq + self.swag_lr = swag_lr + + def compute_param_scores(self, train_loader): + if self.likelihood == 'classification': + criterion = CrossEntropyLoss(reduction='mean') + elif self.likelihood == 'regression': + criterion = MSELoss(reduction='mean') + param_variances = fit_diagonal_swag(self.model, train_loader, criterion, n_snapshots_total=self.swag_n_snapshots, snapshot_freq=self.swag_snapshot_freq, lr=self.swag_lr) + return param_variances + + class ParamNameSubnetMask(SubnetMask): """Subnetwork mask corresponding to the specified parameters of the neural network. diff --git a/laplace/swag.py b/laplace/swag.py new file mode 100644 index 00000000..c8461fbb --- /dev/null +++ b/laplace/swag.py @@ -0,0 +1,82 @@ +from copy import deepcopy +from tqdm import tqdm + +import torch +from torch.nn.utils import parameters_to_vector + + +def param_vector(model): + return parameters_to_vector(model.parameters()).detach() + + +def fit_diagonal_swag(model, train_loader, criterion, n_snapshots_total=40, snapshot_freq=1, lr=0.01, momentum=0.9, weight_decay=3e-4, min_var=1e-30): + """ + Fit diagonal SWAG [1], which estimates marginal variances of model parameters by + computing the first and second moment of SGD iterates with a large learning rate. + + Implementation partly adapted from: + - https://github.com/wjmaddox/swa_gaussian/blob/master/swag/posteriors/swag.py + - https://github.com/wjmaddox/swa_gaussian/blob/master/experiments/train/run_swag.py + + References + ---------- + [1] Maddox, W., Garipov, T., Izmailov, P., Vetrov, D., Wilson, AG. + [*A Simple Baseline for Bayesian Uncertainty in Deep Learning*](https://arxiv.org/abs/1902.02476). + NeurIPS 2019. + + Parameters + ---------- + model : torch.nn.Module + train_loader : torch.data.utils.DataLoader + training data loader to use for snapshot collection + criterion : torch.nn.CrossEntropyLoss or torch.nn.MSELoss + loss function to use for snapshot collection + n_snapshots_total : int + total number of model snapshots to collect + snapshot_freq : int + snapshot collection frequency (in epochs) + lr : float + SGD learning rate for collecting snapshots + momentum : float + SGD momentum + weight_decay : float + SGD weight decay + min_var : float + minimum parameter variance to clamp to (for numerical stability) + + Returns + ------- + param_variances : torch.Tensor + vector of marginal variances for each model parameter + """ + + # create a copy of the model to avoid undesired changes to the original model parameters + _model = deepcopy(model) + _model.train() + device = next(_model.parameters()).device + + # initialize running estimates of first and second moment of model parameters + mean = torch.zeros_like(param_vector(_model)) + sq_mean = torch.zeros_like(param_vector(_model)) + n_snapshots = 0 + + # run SGD to collect model snapshots + optimizer = torch.optim.SGD(_model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay) + n_epochs = snapshot_freq * n_snapshots_total + for epoch in tqdm(range(n_epochs)): + for inputs, targets in train_loader: + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + loss = criterion(_model(inputs), targets) + loss.backward() + optimizer.step() + + if epoch % snapshot_freq == 0: + # update running estimates of first and second moment of model parameters + mean = mean * n_snapshots / (n_snapshots + 1) + param_vector(_model) / (n_snapshots + 1) + sq_mean = sq_mean * n_snapshots / (n_snapshots + 1) + param_vector(_model) ** 2 / (n_snapshots + 1) + n_snapshots += 1 + + # compute marginal parameter variances, Var[P] = E[P^2] - E[P]^2 + param_variances = torch.clamp(sq_mean - mean ** 2, min_var) + return param_variances diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 3d15e736..1b10bb25 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -7,12 +7,12 @@ from torch.utils.data import DataLoader, TensorDataset from laplace import Laplace, SubnetLaplace -from laplace.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask +from laplace.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask torch.manual_seed(240) torch.set_default_tensor_type(torch.DoubleTensor) -score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask] +score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask] layer_subnet_masks = [ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask] all_subnet_masks = score_based_subnet_masks + layer_subnet_masks likelihoods = ['classification', 'regression'] @@ -66,7 +66,7 @@ def test_subnet_laplace_init(model, likelihood): def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader): loader = class_loader if likelihood == 'classification' else reg_loader model_params = parameters_to_vector(model.parameters()) - subnetmask_kwargs = dict() + subnetmask_kwargs = dict(likelihood=likelihood) if subnetwork_mask == LargestVarianceSWAGSubnetMask else dict() # should raise error if we don't pass number of subnet parameters within the subnetmask_kwargs with pytest.raises(TypeError): @@ -300,6 +300,7 @@ def test_regression_predictive(model, reg_loader, subnetwork_mask): subnetmask_kwargs = dict(module_names=['0']) else: subnetmask_kwargs = dict() + subnetmask_kwargs.update(dict(likelihood='regression') if subnetwork_mask == LargestVarianceSWAGSubnetMask else dict()) lap = Laplace(model, likelihood='regression', subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) assert isinstance(lap._subnetwork_mask, subnetwork_mask) @@ -335,6 +336,7 @@ def test_classification_predictive(model, class_loader, subnetwork_mask): subnetmask_kwargs = dict(module_names=['0']) else: subnetmask_kwargs = dict() + subnetmask_kwargs.update(dict(likelihood='classification') if subnetwork_mask == LargestVarianceSWAGSubnetMask else dict()) lap = Laplace(model, likelihood='classification', subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) assert isinstance(lap, SubnetLaplace) assert isinstance(lap._subnetwork_mask, subnetwork_mask) From e91646f2d1512128ee86acf99a532cdfad6100c8 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 21 Dec 2021 09:58:33 +0000 Subject: [PATCH 25/49] Remove tqdm dependency in SWAG --- laplace/swag.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/laplace/swag.py b/laplace/swag.py index c8461fbb..a9abf42b 100644 --- a/laplace/swag.py +++ b/laplace/swag.py @@ -1,5 +1,4 @@ from copy import deepcopy -from tqdm import tqdm import torch from torch.nn.utils import parameters_to_vector @@ -63,7 +62,7 @@ def fit_diagonal_swag(model, train_loader, criterion, n_snapshots_total=40, snap # run SGD to collect model snapshots optimizer = torch.optim.SGD(_model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay) n_epochs = snapshot_freq * n_snapshots_total - for epoch in tqdm(range(n_epochs)): + for epoch in range(n_epochs): for inputs, targets in train_loader: inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() From 3a48e021efd69252096748eb6ad460bb1dd37c47 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 21 Dec 2021 10:24:31 +0000 Subject: [PATCH 26/49] Set H=None in SubnetLaplace before calling super() constructor for compatibility with fixed H init --- laplace/subnetlaplace.py | 1 + 1 file changed, 1 insertion(+) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 16ed87cb..3aeb2454 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -74,6 +74,7 @@ class SubnetLaplace(FullLaplace): def __init__(self, model, likelihood, subnetwork_mask, sigma_noise=1., prior_precision=1., prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None, subnetmask_kwargs=None): + self.H = None super().__init__(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_precision, prior_mean=prior_mean, temperature=temperature, backend=backend, backend_kwargs=backend_kwargs) From 7ae867f46a2e194c0a0a09be112eafca83e2c23c Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 21 Dec 2021 10:43:17 +0000 Subject: [PATCH 27/49] Change indentation from tabs to spaces --- laplace/subnetmask.py | 8 +-- laplace/swag.py | 116 +++++++++++++++++++++--------------------- 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/laplace/subnetmask.py b/laplace/subnetmask.py index 36a135bd..939cff97 100644 --- a/laplace/subnetmask.py +++ b/laplace/subnetmask.py @@ -185,10 +185,10 @@ class LargestVarianceSWAGSubnetMask(ScoreBasedSubnetMask): number of parameters in the subnetwork (i.e. number of top-scoring parameters to select) likelihood : str 'classification' or 'regression' - swag_n_snapshots : int - number of model snapshots to collect for SWAG - swag_snapshot_freq : int - SWAG snapshot collection frequency (in epochs) + swag_n_snapshots : int + number of model snapshots to collect for SWAG + swag_snapshot_freq : int + SWAG snapshot collection frequency (in epochs) swag_lr : float learning rate for SWAG snapshot collection """ diff --git a/laplace/swag.py b/laplace/swag.py index a9abf42b..d780fe16 100644 --- a/laplace/swag.py +++ b/laplace/swag.py @@ -5,17 +5,17 @@ def param_vector(model): - return parameters_to_vector(model.parameters()).detach() + return parameters_to_vector(model.parameters()).detach() def fit_diagonal_swag(model, train_loader, criterion, n_snapshots_total=40, snapshot_freq=1, lr=0.01, momentum=0.9, weight_decay=3e-4, min_var=1e-30): - """ - Fit diagonal SWAG [1], which estimates marginal variances of model parameters by - computing the first and second moment of SGD iterates with a large learning rate. - - Implementation partly adapted from: - - https://github.com/wjmaddox/swa_gaussian/blob/master/swag/posteriors/swag.py - - https://github.com/wjmaddox/swa_gaussian/blob/master/experiments/train/run_swag.py + """ + Fit diagonal SWAG [1], which estimates marginal variances of model parameters by + computing the first and second moment of SGD iterates with a large learning rate. + + Implementation partly adapted from: + - https://github.com/wjmaddox/swa_gaussian/blob/master/swag/posteriors/swag.py + - https://github.com/wjmaddox/swa_gaussian/blob/master/experiments/train/run_swag.py References ---------- @@ -23,59 +23,59 @@ def fit_diagonal_swag(model, train_loader, criterion, n_snapshots_total=40, snap [*A Simple Baseline for Bayesian Uncertainty in Deep Learning*](https://arxiv.org/abs/1902.02476). NeurIPS 2019. - Parameters - ---------- - model : torch.nn.Module - train_loader : torch.data.utils.DataLoader - training data loader to use for snapshot collection - criterion : torch.nn.CrossEntropyLoss or torch.nn.MSELoss - loss function to use for snapshot collection - n_snapshots_total : int - total number of model snapshots to collect - snapshot_freq : int - snapshot collection frequency (in epochs) - lr : float - SGD learning rate for collecting snapshots - momentum : float - SGD momentum - weight_decay : float - SGD weight decay - min_var : float - minimum parameter variance to clamp to (for numerical stability) + Parameters + ---------- + model : torch.nn.Module + train_loader : torch.data.utils.DataLoader + training data loader to use for snapshot collection + criterion : torch.nn.CrossEntropyLoss or torch.nn.MSELoss + loss function to use for snapshot collection + n_snapshots_total : int + total number of model snapshots to collect + snapshot_freq : int + snapshot collection frequency (in epochs) + lr : float + SGD learning rate for collecting snapshots + momentum : float + SGD momentum + weight_decay : float + SGD weight decay + min_var : float + minimum parameter variance to clamp to (for numerical stability) - Returns - ------- - param_variances : torch.Tensor - vector of marginal variances for each model parameter - """ + Returns + ------- + param_variances : torch.Tensor + vector of marginal variances for each model parameter + """ - # create a copy of the model to avoid undesired changes to the original model parameters - _model = deepcopy(model) - _model.train() - device = next(_model.parameters()).device + # create a copy of the model to avoid undesired changes to the original model parameters + _model = deepcopy(model) + _model.train() + device = next(_model.parameters()).device - # initialize running estimates of first and second moment of model parameters - mean = torch.zeros_like(param_vector(_model)) - sq_mean = torch.zeros_like(param_vector(_model)) - n_snapshots = 0 + # initialize running estimates of first and second moment of model parameters + mean = torch.zeros_like(param_vector(_model)) + sq_mean = torch.zeros_like(param_vector(_model)) + n_snapshots = 0 - # run SGD to collect model snapshots - optimizer = torch.optim.SGD(_model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay) - n_epochs = snapshot_freq * n_snapshots_total - for epoch in range(n_epochs): - for inputs, targets in train_loader: - inputs, targets = inputs.to(device), targets.to(device) - optimizer.zero_grad() - loss = criterion(_model(inputs), targets) - loss.backward() - optimizer.step() + # run SGD to collect model snapshots + optimizer = torch.optim.SGD(_model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay) + n_epochs = snapshot_freq * n_snapshots_total + for epoch in range(n_epochs): + for inputs, targets in train_loader: + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + loss = criterion(_model(inputs), targets) + loss.backward() + optimizer.step() - if epoch % snapshot_freq == 0: - # update running estimates of first and second moment of model parameters - mean = mean * n_snapshots / (n_snapshots + 1) + param_vector(_model) / (n_snapshots + 1) - sq_mean = sq_mean * n_snapshots / (n_snapshots + 1) + param_vector(_model) ** 2 / (n_snapshots + 1) - n_snapshots += 1 + if epoch % snapshot_freq == 0: + # update running estimates of first and second moment of model parameters + mean = mean * n_snapshots / (n_snapshots + 1) + param_vector(_model) / (n_snapshots + 1) + sq_mean = sq_mean * n_snapshots / (n_snapshots + 1) + param_vector(_model) ** 2 / (n_snapshots + 1) + n_snapshots += 1 - # compute marginal parameter variances, Var[P] = E[P^2] - E[P]^2 - param_variances = torch.clamp(sq_mean - mean ** 2, min_var) - return param_variances + # compute marginal parameter variances, Var[P] = E[P^2] - E[P]^2 + param_variances = torch.clamp(sq_mean - mean ** 2, min_var) + return param_variances From 015e91781933c0fc2669986d78e96f8af76943ce Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 21 Dec 2021 11:51:55 +0000 Subject: [PATCH 28/49] Update README: include subnetwork and low-rank Laplace & update paper reference to NeurIPS'21 --- README.md | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 079d1229..7d72853d 100644 --- a/README.md +++ b/README.md @@ -4,17 +4,17 @@ [![Main](https://travis-ci.com/AlexImmer/Laplace.svg?token=rpuRxEjQS6cCZi7ptL9y&branch=main)](https://travis-ci.com/AlexImmer/Laplace) -The laplace package facilitates the application of Laplace approximations for entire neural networks or just their last layer. +The laplace package facilitates the application of Laplace approximations for entire neural networks (NNs), subnetworks of NNs, or just their last layer. The package enables posterior approximations, marginal-likelihood estimation, and various posterior predictive computations. The library documentation is available at [https://aleximmer.github.io/Laplace](https://aleximmer.github.io/Laplace). There is also a corresponding paper, [*Laplace Redux — Effortless Bayesian Deep Learning*](https://arxiv.org/abs/2106.14806), which introduces the library, provides an introduction to the Laplace approximation, reviews its use in deep learning, and empirically demonstrates its versatility and competitiveness. Please consider referring to the paper when using our library: ```bibtex -@article{daxberger2021laplace, - title={Laplace Redux--Effortless Bayesian Deep Learning}, - author={Daxberger, Erik and Kristiadi, Agustinus and Immer, Alexander - and Eschenhagen, Runa and Bauer, Matthias and Hennig, Philipp}, - journal={arXiv preprint arXiv:2106.14806}, +@inproceedings{laplace2021, + title={Laplace Redux--Effortless {B}ayesian Deep Learning}, + author={Erik Daxberger and Agustinus Kristiadi and Alexander Immer + and Runa Eschenhagen and Matthias Bauer and Philipp Hennig}, + booktitle={{N}eur{IPS}}, year={2021} } ``` @@ -39,7 +39,7 @@ pytest tests/ ## Structure The laplace package consists of two main components: -1. The subclasses of [`laplace.BaseLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/baselaplace.py) that implement different sparsity structures: different subsets of weights (`'all'` and `'last_layer'`) and different structures of the Hessian approximation (`'full'`, `'kron'`, and `'diag'`). This results in six currently available options: `laplace.FullLaplace`, `laplace.KronLaplace`, `laplace.DiagLaplace`, and the corresponding last-layer variations `laplace.FullLLLaplace`, `laplace.KronLLLaplace`, and `laplace.DiagLLLaplace`, which are all subclasses of [`laplace.LLLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/lllaplace.py). All of these can be conveniently accessed via the [`laplace.Laplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/laplace.py) function. +1. The subclasses of [`laplace.BaseLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/baselaplace.py) that implement different sparsity structures: different subsets of weights (`'all'`, `'subnetwork'` and `'last_layer'`) and different structures of the Hessian approximation (`'full'`, `'kron'`, `'lowrank'` and `'diag'`). This results in eight currently available options: `laplace.FullLaplace`, `laplace.KronLaplace`, `laplace.DiagLaplace`, the corresponding last-layer variations `laplace.FullLLLaplace`, `laplace.KronLLLaplace`, and `laplace.DiagLLLaplace` (which are all subclasses of [`laplace.LLLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/lllaplace.py)), `laplace.SubnetLaplace` (which only supports a `'full'` Hessian approximation) and `laplace.LowRankLaplace` (which only supports inference over `'all'` weights). All of these can be conveniently accessed via the [`laplace.Laplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/laplace.py) function. 2. The backends in [`laplace.curvature`](https://github.com/AlexImmer/Laplace/blob/main/laplace/curvature/) which provide access to Hessian approximations of the corresponding sparsity structures, for example, the diagonal GGN. @@ -48,9 +48,15 @@ decomposing a neural network into feature extractor and last layer for `LLLaplac and effectively dealing with Kronecker factors ([`laplace.matrix`](https://github.com/AlexImmer/Laplace/blob/main/laplace/matrix.py)). +Finally, the package implements several options to select/specify the subnetwork for `laplace.SubnetLaplace` (as subclasses of ([`laplace.subnetmask.SubnetMask`](https://github.com/AlexImmer/Laplace/blob/main/laplace/subnetmask.py)). +Automatic subnetwork selection strategies include: uniformly at random (`laplace.subnetmask.RandomSubnetMask`), by largest parameter magnitudes (`laplace.subnetmask.LargestMagnitudeSubnetMask`) and by largest marginal parameter variances (`laplace.subnetmask.LargestVarianceDiagLaplaceSubnetMask` and `laplace.subnetmask.LargestVarianceSWAGSubnetMask`). +In addition to that, subnetworks can also be specified manually, by listing the names of either the model parameters (`laplace.subnetmask.ParamNameSubnetMask`) or modules (`laplace.subnetmask.ModuleNameSubnetMask`) to perform Laplace inference over. + ## Extendability To extend the laplace package, new `BaseLaplace` subclasses can be designed, for example, -a block-diagonal structure or subset-of-weights Laplace. +Laplace with a block-diagonal Hessian structure. +One can also implement custom subnetwork selection strategies as new subclasses of `SubnetMask`. + Alternatively, extending or integrating backends (subclasses of [`curvature.curvature`](https://github.com/AlexImmer/Laplace/blob/main/laplace/curvature/curvature.py)) allows to provide different Hessian approximations to the Laplace approximations. For example, currently the [`curvature.BackPackInterface`](https://github.com/AlexImmer/Laplace/blob/main/laplace/curvature/backpack.py) based on [BackPACK](https://github.com/f-dangel/backpack/) and [`curvature.AsdlInterface`](https://github.com/AlexImmer/Laplace/blob/main/laplace/curvature/asdl.py) based on [ASDL](https://github.com/kazukiosawa/asdfghjkl) are available. @@ -60,10 +66,11 @@ for a regression (MSELoss) loss function. ## Example usage -### *Post-hoc* prior precision tuning of last-layer LA +### *Post-hoc* prior precision tuning of diagonal LA In the following example, a pre-trained model is loaded, -then the Laplace approximation is fit to the training data, +then the Laplace approximation is fit to the training data +(using a diagonal Hessian approximation over all parameters), and the prior precision is optimized with cross-validation `'CV'`. After that, the resulting LA is used for prediction with the `'probit'` predictive for classification. @@ -122,7 +129,7 @@ pdoc --http 0.0.0.0:8080 laplace --template-dir template ## References -This package relies on various improvements to the Laplace approximation for neural networks, which was originally due to MacKay [1]. +This package relies on various improvements to the Laplace approximation for neural networks, which was originally due to MacKay [1]. Please consider citing the respective papers if you use any of their proposed methods via our laplace library. - [1] MacKay, DJC. [*A Practical Bayesian Framework for Backpropagation Networks*](https://authors.library.caltech.edu/13793/). Neural Computation 1992. - [2] Gibbs, M. N. [*Bayesian Gaussian Processes for Regression and Classification*](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1130&rep=rep1&type=pdf). PhD Thesis 1997. @@ -132,4 +139,6 @@ This package relies on various improvements to the Laplace approximation for neu - [6] Khan, M. E., Immer, A., Abedi, E., Korzepa, M. [*Approximate Inference Turns Deep Networks into Gaussian Processes*](https://arxiv.org/abs/1906.01930). NeurIPS 2019. - [7] Kristiadi, A., Hein, M., Hennig, P. [*Being Bayesian, Even Just a Bit, Fixes Overconfidence in ReLU Networks*](https://arxiv.org/abs/2002.10118). ICML 2020. - [8] Immer, A., Korzepa, M., Bauer, M. [*Improving predictions of Bayesian neural nets via local linearization*](https://arxiv.org/abs/2008.08400). AISTATS 2021. -- [9] Immer, A., Bauer, M., Fortuin, V., Rätsch, G., Khan, EM. [*Scalable Marginal Likelihood Estimation for Model Selection in Deep Learning*](https://arxiv.org/abs/2104.04975). ICML 2021. +- [9] Sharma, A., Azizan, N., Pavone, M. [*Sketching Curvature for Efficient Out-of-Distribution Detection for Deep Neural Networks*](https://arxiv.org/abs/2102.12567). UAI 2021. +- [10] Immer, A., Bauer, M., Fortuin, V., Rätsch, G., Khan, EM. [*Scalable Marginal Likelihood Estimation for Model Selection in Deep Learning*](https://arxiv.org/abs/2104.04975). ICML 2021. +- [11] Daxberger, E., Nalisnick, E., Allingham, JU., Antorán, J., Hernández-Lobato, JM. [*Bayesian Deep Learning via Subnetwork Inference*](https://arxiv.org/abs/2010.14689). ICML 2021. \ No newline at end of file From fce8545d76a3b53a56a112e909460b1f0efe1666 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 21 Dec 2021 11:57:33 +0000 Subject: [PATCH 29/49] Minor changes to README --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 7d72853d..1773728e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Main](https://travis-ci.com/AlexImmer/Laplace.svg?token=rpuRxEjQS6cCZi7ptL9y&branch=main)](https://travis-ci.com/AlexImmer/Laplace) -The laplace package facilitates the application of Laplace approximations for entire neural networks (NNs), subnetworks of NNs, or just their last layer. +The laplace package facilitates the application of Laplace approximations for entire neural networks, subnetworks of neural networks, or just their last layer. The package enables posterior approximations, marginal-likelihood estimation, and various posterior predictive computations. The library documentation is available at [https://aleximmer.github.io/Laplace](https://aleximmer.github.io/Laplace). @@ -39,7 +39,7 @@ pytest tests/ ## Structure The laplace package consists of two main components: -1. The subclasses of [`laplace.BaseLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/baselaplace.py) that implement different sparsity structures: different subsets of weights (`'all'`, `'subnetwork'` and `'last_layer'`) and different structures of the Hessian approximation (`'full'`, `'kron'`, `'lowrank'` and `'diag'`). This results in eight currently available options: `laplace.FullLaplace`, `laplace.KronLaplace`, `laplace.DiagLaplace`, the corresponding last-layer variations `laplace.FullLLLaplace`, `laplace.KronLLLaplace`, and `laplace.DiagLLLaplace` (which are all subclasses of [`laplace.LLLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/lllaplace.py)), `laplace.SubnetLaplace` (which only supports a `'full'` Hessian approximation) and `laplace.LowRankLaplace` (which only supports inference over `'all'` weights). All of these can be conveniently accessed via the [`laplace.Laplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/laplace.py) function. +1. The subclasses of [`laplace.BaseLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/baselaplace.py) that implement different sparsity structures: different subsets of weights (`'all'`, `'subnetwork'` and `'last_layer'`) and different structures of the Hessian approximation (`'full'`, `'kron'`, `'lowrank'` and `'diag'`). This results in _eight_ currently available options: `laplace.FullLaplace`, `laplace.KronLaplace`, `laplace.DiagLaplace`, the corresponding last-layer variations `laplace.FullLLLaplace`, `laplace.KronLLLaplace`, and `laplace.DiagLLLaplace` (which are all subclasses of [`laplace.LLLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/lllaplace.py)), `laplace.SubnetLaplace` (which only supports a `'full'` Hessian approximation) and `laplace.LowRankLaplace` (which only supports inference over `'all'` weights). All of these can be conveniently accessed via the [`laplace.Laplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/laplace.py) function. 2. The backends in [`laplace.curvature`](https://github.com/AlexImmer/Laplace/blob/main/laplace/curvature/) which provide access to Hessian approximations of the corresponding sparsity structures, for example, the diagonal GGN. @@ -48,9 +48,9 @@ decomposing a neural network into feature extractor and last layer for `LLLaplac and effectively dealing with Kronecker factors ([`laplace.matrix`](https://github.com/AlexImmer/Laplace/blob/main/laplace/matrix.py)). -Finally, the package implements several options to select/specify the subnetwork for `laplace.SubnetLaplace` (as subclasses of ([`laplace.subnetmask.SubnetMask`](https://github.com/AlexImmer/Laplace/blob/main/laplace/subnetmask.py)). -Automatic subnetwork selection strategies include: uniformly at random (`laplace.subnetmask.RandomSubnetMask`), by largest parameter magnitudes (`laplace.subnetmask.LargestMagnitudeSubnetMask`) and by largest marginal parameter variances (`laplace.subnetmask.LargestVarianceDiagLaplaceSubnetMask` and `laplace.subnetmask.LargestVarianceSWAGSubnetMask`). -In addition to that, subnetworks can also be specified manually, by listing the names of either the model parameters (`laplace.subnetmask.ParamNameSubnetMask`) or modules (`laplace.subnetmask.ModuleNameSubnetMask`) to perform Laplace inference over. +Finally, the package implements several options to select/specify a subnetwork for `laplace.SubnetLaplace` (as subclasses of [`laplace.subnetmask.SubnetMask`](https://github.com/AlexImmer/Laplace/blob/main/laplace/subnetmask.py). +Automatic subnetwork selection strategies include: uniformly at random (`laplace.subnetmask.RandomSubnetMask`), by largest parameter magnitudes (`LargestMagnitudeSubnetMask`), and by largest marginal parameter variances (`LargestVarianceDiagLaplaceSubnetMask` and `LargestVarianceSWAGSubnetMask`). +In addition to that, subnetworks can also be specified manually, by listing the names of either the model parameters (`ParamNameSubnetMask`) or modules (`ModuleNameSubnetMask`) to perform Laplace inference over. ## Extendability To extend the laplace package, new `BaseLaplace` subclasses can be designed, for example, From 55e418a3b561f3a5c874aa8086a1350ecad61d2b Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Mon, 3 Jan 2022 10:40:19 +0000 Subject: [PATCH 30/49] Move utility files to new utils directory --- README.md | 8 ++++---- laplace/baselaplace.py | 8 ++++---- laplace/curvature/asdl.py | 4 ++-- laplace/curvature/backpack.py | 2 +- laplace/curvature/curvature.py | 8 ++++---- laplace/lllaplace.py | 8 ++++---- laplace/marglik_training.py | 2 +- laplace/subnetlaplace.py | 6 +++--- laplace/{ => utils}/feature_extractor.py | 0 laplace/{ => utils}/matrix.py | 2 +- laplace/{ => utils}/subnetmask.py | 4 ++-- laplace/{ => utils}/swag.py | 0 laplace/{ => utils}/utils.py | 0 tests/test_baselaplace.py | 2 +- tests/test_feature_extractor.py | 2 +- tests/test_jacobians.py | 3 +-- tests/test_lllaplace.py | 4 ++-- tests/test_matrix.py | 6 +++--- tests/test_subnetlaplace.py | 2 +- tests/test_utils.py | 2 +- 20 files changed, 36 insertions(+), 37 deletions(-) rename laplace/{ => utils}/feature_extractor.py (100%) rename laplace/{ => utils}/matrix.py (99%) rename laplace/{ => utils}/subnetmask.py (99%) rename laplace/{ => utils}/swag.py (100%) rename laplace/{ => utils}/utils.py (100%) diff --git a/README.md b/README.md index 1773728e..fbe90ee0 100644 --- a/README.md +++ b/README.md @@ -44,12 +44,12 @@ The laplace package consists of two main components: the corresponding sparsity structures, for example, the diagonal GGN. Additionally, the package provides utilities for -decomposing a neural network into feature extractor and last layer for `LLLaplace` subclasses ([`laplace.feature_extractor`](https://github.com/AlexImmer/Laplace/blob/main/laplace/feature_extractor.py)) +decomposing a neural network into feature extractor and last layer for `LLLaplace` subclasses ([`laplace.utils.feature_extractor`](https://github.com/AlexImmer/Laplace/blob/main/laplace/utils/feature_extractor.py)) and -effectively dealing with Kronecker factors ([`laplace.matrix`](https://github.com/AlexImmer/Laplace/blob/main/laplace/matrix.py)). +effectively dealing with Kronecker factors ([`laplace.utils.matrix`](https://github.com/AlexImmer/Laplace/blob/main/laplace/utils/matrix.py)). -Finally, the package implements several options to select/specify a subnetwork for `laplace.SubnetLaplace` (as subclasses of [`laplace.subnetmask.SubnetMask`](https://github.com/AlexImmer/Laplace/blob/main/laplace/subnetmask.py). -Automatic subnetwork selection strategies include: uniformly at random (`laplace.subnetmask.RandomSubnetMask`), by largest parameter magnitudes (`LargestMagnitudeSubnetMask`), and by largest marginal parameter variances (`LargestVarianceDiagLaplaceSubnetMask` and `LargestVarianceSWAGSubnetMask`). +Finally, the package implements several options to select/specify a subnetwork for `laplace.SubnetLaplace` (as subclasses of [`laplace.utils.subnetmask.SubnetMask`](https://github.com/AlexImmer/Laplace/blob/main/laplace/utils/subnetmask.py). +Automatic subnetwork selection strategies include: uniformly at random (`laplace.utils.subnetmask.RandomSubnetMask`), by largest parameter magnitudes (`LargestMagnitudeSubnetMask`), and by largest marginal parameter variances (`LargestVarianceDiagLaplaceSubnetMask` and `LargestVarianceSWAGSubnetMask`). In addition to that, subnetworks can also be specified manually, by listing the names of either the model parameters (`ParamNameSubnetMask`) or modules (`ModuleNameSubnetMask`) to perform Laplace inference over. ## Extendability diff --git a/laplace/baselaplace.py b/laplace/baselaplace.py index 522fe047..2c382d98 100644 --- a/laplace/baselaplace.py +++ b/laplace/baselaplace.py @@ -4,8 +4,8 @@ from torch.nn.utils import parameters_to_vector, vector_to_parameters from torch.distributions import MultivariateNormal, Dirichlet, Normal -from laplace.utils import parameters_per_layer, invsqrt_precision, get_nll, validate -from laplace.matrix import Kron +from laplace.utils.utils import parameters_per_layer, invsqrt_precision, get_nll, validate +from laplace.utils.matrix import Kron from laplace.curvature import BackPackGGN, AsdlHessian @@ -754,7 +754,7 @@ class KronLaplace(ParametricLaplace): Mathematically, we have for each parameter group, e.g., torch.nn.Module, that \\P\\approx Q \\otimes H\\. See `BaseLaplace` for the full interface and see - `laplace.matrix.Kron` and `laplace.matrix.KronDecomposed` for the structure of + `laplace.utils.matrix.Kron` and `laplace.utils.matrix.KronDecomposed` for the structure of the Kronecker factors. `Kron` is used to aggregate factors by summing up and `KronDecomposed` is used to add the prior, a Hessian factor (e.g. temperature), and computing posterior covariances, marginal likelihood, etc. @@ -812,7 +812,7 @@ def posterior_precision(self): Returns ------- - precision : `laplace.matrix.KronDecomposed` + precision : `laplace.utils.matrix.KronDecomposed` """ self._check_H_init() return self.H * self._H_factor + self.prior_precision diff --git a/laplace/curvature/asdl.py b/laplace/curvature/asdl.py index fc7eda9a..a3ccd3d6 100644 --- a/laplace/curvature/asdl.py +++ b/laplace/curvature/asdl.py @@ -9,8 +9,8 @@ from asdfghjkl.gradient import batch_gradient from laplace.curvature import CurvatureInterface, GGNInterface, EFInterface -from laplace.matrix import Kron -from laplace.utils import _is_batchnorm +from laplace.utils.matrix import Kron +from laplace.utils.utils import _is_batchnorm EPS = 1e-6 diff --git a/laplace/curvature/backpack.py b/laplace/curvature/backpack.py index a0885800..5c78a093 100644 --- a/laplace/curvature/backpack.py +++ b/laplace/curvature/backpack.py @@ -5,7 +5,7 @@ from backpack.context import CTX from laplace.curvature import CurvatureInterface, GGNInterface, EFInterface -from laplace.matrix import Kron +from laplace.utils.matrix import Kron class BackPackInterface(CurvatureInterface): diff --git a/laplace/curvature/curvature.py b/laplace/curvature/curvature.py index 98b703b7..72b0a041 100644 --- a/laplace/curvature/curvature.py +++ b/laplace/curvature/curvature.py @@ -11,7 +11,7 @@ class CurvatureInterface: Parameters ---------- - model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` + model : torch.nn.Module or `laplace.utils.feature_extractor.FeatureExtractor` torch model (neural network) likelihood : {'classification', 'regression'} last_layer : bool, default=False @@ -143,7 +143,7 @@ def kron(self, x, y, **kwargs): Returns ------- loss : torch.Tensor - H : `laplace.matrix.Kron` + H : `laplace.utils.matrix.Kron` Kronecker factored Hessian approximation. """ raise NotImplementedError @@ -175,7 +175,7 @@ class GGNInterface(CurvatureInterface): Parameters ---------- - model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` + model : torch.nn.Module or `laplace.utils.feature_extractor.FeatureExtractor` torch model (neural network) likelihood : {'classification', 'regression'} last_layer : bool, default=False @@ -254,7 +254,7 @@ class EFInterface(CurvatureInterface): Parameters ---------- - model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` + model : torch.nn.Module or `laplace.utils.feature_extractor.FeatureExtractor` torch model (neural network) likelihood : {'classification', 'regression'} last_layer : bool, default=False diff --git a/laplace/lllaplace.py b/laplace/lllaplace.py index b00dbf55..8f93ff53 100644 --- a/laplace/lllaplace.py +++ b/laplace/lllaplace.py @@ -3,9 +3,9 @@ from torch.nn.utils import parameters_to_vector, vector_to_parameters from laplace.baselaplace import ParametricLaplace, FullLaplace, KronLaplace, DiagLaplace -from laplace.feature_extractor import FeatureExtractor +from laplace.utils.feature_extractor import FeatureExtractor -from laplace.matrix import Kron +from laplace.utils.matrix import Kron from laplace.curvature import BackPackGGN @@ -36,7 +36,7 @@ class LLLaplace(ParametricLaplace): Parameters ---------- - model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` + model : torch.nn.Module or `laplace.utils.feature_extractor.FeatureExtractor` likelihood : {'classification', 'regression'} determines the log likelihood Hessian approximation sigma_noise : torch.Tensor or float, default=1 @@ -168,7 +168,7 @@ class KronLLLaplace(LLLaplace, KronLaplace): Mathematically, we have for the last parameter group, i.e., torch.nn.Linear, that \\P\\approx Q \\otimes H\\. See `KronLaplace`, `LLLaplace`, and `BaseLaplace` for the full interface and see - `laplace.matrix.Kron` and `laplace.matrix.KronDecomposed` for the structure of + `laplace.utils.matrix.Kron` and `laplace.utils.matrix.KronDecomposed` for the structure of the Kronecker factors. `Kron` is used to aggregate factors by summing up and `KronDecomposed` is used to add the prior, a Hessian factor (e.g. temperature), and computing posterior covariances, marginal likelihood, etc. diff --git a/laplace/marglik_training.py b/laplace/marglik_training.py index ec100542..3a2b2161 100644 --- a/laplace/marglik_training.py +++ b/laplace/marglik_training.py @@ -9,7 +9,7 @@ from laplace import Laplace from laplace.curvature import AsdlGGN -from laplace.utils import expand_prior_precision +from laplace.utils.utils import expand_prior_precision def marglik_training( diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 3aeb2454..dc952d0a 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -4,7 +4,7 @@ from laplace.baselaplace import FullLaplace, DiagLaplace from laplace.curvature import BackPackGGN -from laplace.subnetmask import LargestVarianceDiagLaplaceSubnetMask +from laplace.utils.subnetmask import LargestVarianceDiagLaplaceSubnetMask __all__ = ['SubnetLaplace'] @@ -46,10 +46,10 @@ class SubnetLaplace(FullLaplace): Parameters ---------- - model : torch.nn.Module or `laplace.feature_extractor.FeatureExtractor` + model : torch.nn.Module or `laplace.utils.feature_extractor.FeatureExtractor` likelihood : {'classification', 'regression'} determines the log likelihood Hessian approximation - subnetwork_mask : subclasses of `laplace.subnetmask.SubnetMask`, default=None + subnetwork_mask : subclasses of `laplace.utils.subnetmask.SubnetMask`, default=None mask defining the subnetwork to apply the Laplace approximation over sigma_noise : torch.Tensor or float, default=1 observation noise for the regression setting; must be 1 for classification diff --git a/laplace/feature_extractor.py b/laplace/utils/feature_extractor.py similarity index 100% rename from laplace/feature_extractor.py rename to laplace/utils/feature_extractor.py diff --git a/laplace/matrix.py b/laplace/utils/matrix.py similarity index 99% rename from laplace/matrix.py rename to laplace/utils/matrix.py index 61c07ab5..30b0245d 100644 --- a/laplace/matrix.py +++ b/laplace/utils/matrix.py @@ -3,7 +3,7 @@ import numpy as np from typing import Union -from laplace.utils import _is_valid_scalar, symeig, kron, block_diag +from laplace.utils.utils import _is_valid_scalar, symeig, kron, block_diag class Kron: diff --git a/laplace/subnetmask.py b/laplace/utils/subnetmask.py similarity index 99% rename from laplace/subnetmask.py rename to laplace/utils/subnetmask.py index 939cff97..a5cdc3d0 100644 --- a/laplace/subnetmask.py +++ b/laplace/utils/subnetmask.py @@ -4,8 +4,8 @@ from torch.nn import CrossEntropyLoss, MSELoss from torch.nn.utils import parameters_to_vector -from laplace.feature_extractor import FeatureExtractor -from laplace.swag import fit_diagonal_swag +from laplace.utils.feature_extractor import FeatureExtractor +from laplace.utils.swag import fit_diagonal_swag __all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', 'LargestVarianceSWAGSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] diff --git a/laplace/swag.py b/laplace/utils/swag.py similarity index 100% rename from laplace/swag.py rename to laplace/utils/swag.py diff --git a/laplace/utils.py b/laplace/utils/utils.py similarity index 100% rename from laplace/utils.py rename to laplace/utils/utils.py diff --git a/tests/test_baselaplace.py b/tests/test_baselaplace.py index 75529be8..a9292e8d 100644 --- a/tests/test_baselaplace.py +++ b/tests/test_baselaplace.py @@ -12,7 +12,7 @@ from torchvision.models import wide_resnet50_2 from laplace.laplace import FullLaplace, KronLaplace, DiagLaplace, LowRankLaplace -from laplace.matrix import KronDecomposed +from laplace.utils.matrix import KronDecomposed from tests.utils import jacobians_naive diff --git a/tests/test_feature_extractor.py b/tests/test_feature_extractor.py index 37494d76..b80bbcb4 100644 --- a/tests/test_feature_extractor.py +++ b/tests/test_feature_extractor.py @@ -2,7 +2,7 @@ import torch.nn as nn import torchvision.models as models -from laplace.feature_extractor import FeatureExtractor +from laplace.utils.feature_extractor import FeatureExtractor class CNN(nn.Module): diff --git a/tests/test_jacobians.py b/tests/test_jacobians.py index 45cd2f37..0495adb3 100644 --- a/tests/test_jacobians.py +++ b/tests/test_jacobians.py @@ -1,10 +1,9 @@ import pytest import torch from torch import nn -from torch.nn.utils import parameters_to_vector from laplace.curvature import AsdlInterface, BackPackInterface -from laplace.feature_extractor import FeatureExtractor +from laplace.utils.feature_extractor import FeatureExtractor from tests.utils import jacobians_naive diff --git a/tests/test_lllaplace.py b/tests/test_lllaplace.py index 65fbf1a3..bc6f7a5e 100644 --- a/tests/test_lllaplace.py +++ b/tests/test_lllaplace.py @@ -8,8 +8,8 @@ from torch.distributions import Normal, Categorical from torchvision.models import wide_resnet50_2 -from laplace.lllaplace import LLLaplace, FullLLLaplace, KronLLLaplace, DiagLLLaplace -from laplace.feature_extractor import FeatureExtractor +from laplace.lllaplace import FullLLLaplace, KronLLLaplace, DiagLLLaplace +from laplace.utils.feature_extractor import FeatureExtractor from tests.utils import jacobians_naive diff --git a/tests/test_matrix.py b/tests/test_matrix.py index fb5bef1e..66a5da48 100644 --- a/tests/test_matrix.py +++ b/tests/test_matrix.py @@ -4,10 +4,10 @@ from torch import nn from torch.nn.utils import parameters_to_vector -from laplace.matrix import Kron, KronDecomposed -from laplace.utils import kron as kron_prod +from laplace.utils.matrix import Kron +from laplace.utils.utils import kron as kron_prod from laplace.curvature import BackPackGGN -from laplace.utils import block_diag +from laplace.utils.utils import block_diag from tests.utils import get_psd_matrix, jacobians_naive diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 1b10bb25..9007a7a5 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -7,7 +7,7 @@ from torch.utils.data import DataLoader, TensorDataset from laplace import Laplace, SubnetLaplace -from laplace.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask +from laplace.utils.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask torch.manual_seed(240) diff --git a/tests/test_utils.py b/tests/test_utils.py index 1ad0f517..b673be3d 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,5 +1,5 @@ import torch -from laplace.utils import invsqrt_precision, diagonal_add_scalar, symeig +from laplace.utils.utils import invsqrt_precision, diagonal_add_scalar, symeig def test_sqrt_precision(): From 6044316168ad38cd77e017f1d6facb6745df42b7 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Mon, 3 Jan 2022 14:56:51 +0000 Subject: [PATCH 31/49] Change SubnetLaplace to take subnetwork indices instead of a subclass of SubnetMask --- laplace/subnetlaplace.py | 62 ++++----- laplace/utils/subnetmask.py | 13 +- tests/test_subnetlaplace.py | 243 ++++++++++++++++++++---------------- 3 files changed, 174 insertions(+), 144 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index dc952d0a..25c5ae96 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -1,10 +1,8 @@ import torch from torch.distributions import MultivariateNormal -from laplace.baselaplace import FullLaplace, DiagLaplace - +from laplace.baselaplace import FullLaplace from laplace.curvature import BackPackGGN -from laplace.utils.subnetmask import LargestVarianceDiagLaplaceSubnetMask __all__ = ['SubnetLaplace'] @@ -49,8 +47,9 @@ class SubnetLaplace(FullLaplace): model : torch.nn.Module or `laplace.utils.feature_extractor.FeatureExtractor` likelihood : {'classification', 'regression'} determines the log likelihood Hessian approximation - subnetwork_mask : subclasses of `laplace.utils.subnetmask.SubnetMask`, default=None - mask defining the subnetwork to apply the Laplace approximation over + subnetwork_indices : torch.Tensor, default=None + indices of the vectorized model parameters that define the subnetwork + to apply the Laplace approximation over sigma_noise : torch.Tensor or float, default=1 observation noise for the regression setting; must be 1 for classification prior_precision : torch.Tensor or float, default=1 @@ -66,29 +65,38 @@ class SubnetLaplace(FullLaplace): backend_kwargs : dict, default=None arguments passed to the backend on initialization, for example to set the number of MC samples for stochastic approximations. - subnetmask_kwargs : dict, default=None - arguments passed to the subnetwork mask on initialization. """ # key to map to correct subclass of BaseLaplace, (subset of weights, Hessian structure) _key = ('subnetwork', 'full') - def __init__(self, model, likelihood, subnetwork_mask, sigma_noise=1., prior_precision=1., - prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None, subnetmask_kwargs=None): + def __init__(self, model, likelihood, subnetwork_indices=None, sigma_noise=1., prior_precision=1., + prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): self.H = None super().__init__(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_precision, prior_mean=prior_mean, temperature=temperature, backend=backend, backend_kwargs=backend_kwargs) - self._subnetmask_kwargs = dict() if subnetmask_kwargs is None else subnetmask_kwargs - if subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask: - # instantiate and pass diagonal Laplace model for largest variance subnetwork selection - self._subnetmask_kwargs.update(diag_laplace_model=DiagLaplace(self.model, likelihood, sigma_noise, - prior_precision, prior_mean, temperature, backend, backend_kwargs)) - self._subnetwork_mask = subnetwork_mask(self.model, **self._subnetmask_kwargs) - self.n_params_subnet = None + # check validity of subnetwork indices and pass them to backend + self._check_subnetwork_indices(subnetwork_indices) + self.backend.subnetwork_indices = subnetwork_indices + self.n_params_subnet = len(subnetwork_indices) def _init_H(self): self.H = torch.zeros(self.n_params_subnet, self.n_params_subnet, device=self._device) + def _check_subnetwork_indices(self, subnetwork_indices): + """Check that subnetwork indices are valid indices of the vectorized model parameters. + """ + if subnetwork_indices is None: + raise ValueError('Subnetwork indices cannot be None.') + elif not (isinstance(subnetwork_indices, torch.Tensor) and len(subnetwork_indices.shape) == 1 and\ + subnetwork_indices.dtype in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8]): + raise ValueError('Subnetwork indices need to be 1-dimensional integral torch.Tensor!') + elif not (len(subnetwork_indices[subnetwork_indices < 0]) == 0 and\ + len(subnetwork_indices[subnetwork_indices >= self.n_params]) == 0): + raise ValueError(f'Subnetwork indices must lie between 0 and n_params={self.n_params}.') + elif not (subnetwork_indices.sort()[0].equal(torch.unique(subnetwork_indices, sorted=True))): + raise ValueError('Subnetwork indices must be unique.') + @property def prior_precision_diag(self): """Obtain the diagonal prior precision \\(p_0\\) constructed from either @@ -107,31 +115,13 @@ def prior_precision_diag(self): else: raise ValueError('Mismatch of prior and model. Diagonal or scalar prior.') - def fit(self, train_loader): - """Fit the local Laplace approximation at the parameters of the subnetwork. - - Parameters - ---------- - train_loader : torch.data.utils.DataLoader - each iterate is a training batch (X, y); - `train_loader.dataset` needs to be set to access \\(N\\), size of the data set - """ - - # select subnetwork and pass it to backend - self._subnetwork_mask.select(train_loader) - self.backend.subnetwork_indices = self._subnetwork_mask.indices - self.n_params_subnet = self._subnetwork_mask.n_params_subnet - - # fit Laplace approximation over subnetwork - super().fit(train_loader) - def sample(self, n_samples=100): # sample parameters just of the subnetwork - subnet_mean = self.mean[self._subnetwork_mask.indices] + subnet_mean = self.mean[self.backend.subnetwork_indices] dist = MultivariateNormal(loc=subnet_mean, scale_tril=self.posterior_scale) subnet_samples = dist.sample((n_samples,)) # set all other parameters to their MAP estimates full_samples = self.mean.repeat(n_samples, 1) - full_samples[:, self._subnetwork_mask.indices] = subnet_samples + full_samples[:, self.backend.subnetwork_indices] = subnet_samples return full_samples diff --git a/laplace/utils/subnetmask.py b/laplace/utils/subnetmask.py index a5cdc3d0..f1213d55 100644 --- a/laplace/utils/subnetmask.py +++ b/laplace/utils/subnetmask.py @@ -68,12 +68,12 @@ def convert_subnet_mask_to_indices(self, subnet_mask): subnet_mask_indices = subnet_mask.nonzero(as_tuple=True)[0] return subnet_mask_indices - def select(self, train_loader): + def select(self, train_loader=None): """ Select the subnetwork mask. Parameters ---------- - train_loader : torch.data.utils.DataLoader + train_loader : torch.data.utils.DataLoader, default=None each iterate is a training batch (X, y); `train_loader.dataset` needs to be set to access \\(N\\), size of the data set """ @@ -170,6 +170,9 @@ def __init__(self, model, n_params_subnet, diag_laplace_model): self.diag_laplace_model = diag_laplace_model def compute_param_scores(self, train_loader): + if train_loader is None: + raise ValueError('Need to pass train loader for subnet selection.') + self.diag_laplace_model.fit(train_loader) return self.diag_laplace_model.posterior_variance @@ -200,6 +203,9 @@ def __init__(self, model, n_params_subnet, likelihood='classification', swag_n_s self.swag_lr = swag_lr def compute_param_scores(self, train_loader): + if train_loader is None: + raise ValueError('Need to pass train loader for subnet selection.') + if self.likelihood == 'classification': criterion = CrossEntropyLoss(reduction='mean') elif self.likelihood == 'regression': @@ -315,6 +321,9 @@ def __init__(self, model, last_layer_name=None): def get_subnet_mask(self, train_loader): """ Get the subnetwork mask identifying the last layer.""" + if train_loader is None: + raise ValueError('Need to pass train loader for subnet selection.') + self._feature_extractor.eval() if self._feature_extractor.last_layer is None: X = next(iter(train_loader))[0] diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 9007a7a5..1a024a75 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -7,6 +7,7 @@ from torch.utils.data import DataLoader, TensorDataset from laplace import Laplace, SubnetLaplace +from laplace.baselaplace import DiagLaplace from laplace.utils.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask @@ -42,77 +43,86 @@ def reg_loader(): @pytest.mark.parametrize('likelihood', likelihoods) def test_subnet_laplace_init(model, likelihood): - # use last-layer subnet mask for this test - subnetwork_mask = LastLayerSubnetMask + # use random subnet mask for this test + subnetwork_mask = RandomSubnetMask + subnetmask_kwargs = dict(model=model, n_params_subnet=10) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select() # subnet Laplace with full Hessian should work hessian_structure = 'full' - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) assert isinstance(lap, SubnetLaplace) # subnet Laplace with diag, kron or lowrank Hessians should raise errors hessian_structure = 'diag' with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) hessian_structure = 'kron' with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) hessian_structure = 'lowrank' with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) @pytest.mark.parametrize('subnetwork_mask,likelihood', product(score_based_subnet_masks, likelihoods)) def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader): loader = class_loader if likelihood == 'classification' else reg_loader model_params = parameters_to_vector(model.parameters()) - subnetmask_kwargs = dict(likelihood=likelihood) if subnetwork_mask == LargestVarianceSWAGSubnetMask else dict() + + # set subnetwork mask arguments + if subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask: + diag_laplace_model = DiagLaplace(model, likelihood) + subnetmask_kwargs = dict(model=model, diag_laplace_model=diag_laplace_model) + elif subnetwork_mask == LargestVarianceSWAGSubnetMask: + subnetmask_kwargs = dict(model=model, likelihood=likelihood) + else: + subnetmask_kwargs = dict(model=model) # should raise error if we don't pass number of subnet parameters within the subnetmask_kwargs with pytest.raises(TypeError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) # should raise error if we set number of subnet parameters to None subnetmask_kwargs.update(n_params_subnet=None) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) # should raise error if we set number of subnet parameters to be larger than number of model parameters subnetmask_kwargs.update(n_params_subnet=99999) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) - # define valid subnet Laplace model + # define subnetwork mask n_params_subnet = 32 subnetmask_kwargs.update(n_params_subnet=n_params_subnet) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) + subnetmask = subnetwork_mask(**subnetmask_kwargs) # should raise error if we try to access the subnet indices before the subnet has been selected with pytest.raises(AttributeError): - lap._subnetwork_mask.indices + subnetmask.indices # select subnet mask - lap._subnetwork_mask.select(loader) + subnetmask.select(loader) # should raise error if we try to select the subnet again with pytest.raises(ValueError): - lap._subnetwork_mask.select(loader) + subnetmask.select(loader) - # re-define valid subnet Laplace model - n_params_subnet = 32 - subnetmask_kwargs.update(n_params_subnet=n_params_subnet) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + # define valid subnet Laplace model + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) - # fit Laplace model (which internally selects the subnet mask) + # fit Laplace model lap.fit(loader) # check some parameters - assert lap._subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) - assert lap._subnetwork_mask.n_params_subnet == n_params_subnet + assert subnetmask.indices.equal(lap.backend.subnetwork_indices) + assert subnetmask.n_params_subnet == n_params_subnet assert lap.n_params_subnet == n_params_subnet assert parameters_to_vector(model.parameters()).equal(model_params) @@ -120,142 +130,152 @@ def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_load assert lap.H.shape == (n_params_subnet, n_params_subnet) assert lap.prior_precision_diag.shape == (n_params_subnet,) - # should raise error if we try to fit the Laplace mdoel again - with pytest.raises(ValueError): - lap.fit(loader) - @pytest.mark.parametrize('subnetwork_mask,likelihood', product(layer_subnet_masks, likelihoods)) def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader): loader = class_loader if likelihood == 'classification' else reg_loader + subnetmask_kwargs = dict(model=model) # fit last-layer Laplace model lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer', hessian_structure='full') lllap.fit(loader) # should raise error if we pass number of subnet parameters - subnetmask_kwargs = dict(n_params_subnet=32) + subnetmask_kwargs.update(n_params_subnet=32) with pytest.raises(TypeError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) + subnetmask_kwargs = dict(model=model) if subnetwork_mask == ParamNameSubnetMask: # should raise error if we pass no parameter name list - subnetmask_kwargs = dict() + subnetmask_kwargs.update() with pytest.raises(TypeError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) # should raise error if we pass an empty parameter name list - subnetmask_kwargs = dict(parameter_names=[]) + subnetmask_kwargs.update(parameter_names=[]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - lap.fit(loader) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) # should raise error if we pass a parameter name list with invalid parameter names - subnetmask_kwargs = dict(parameter_names=['123']) + subnetmask_kwargs.update(parameter_names=['123']) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - lap.fit(loader) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) # define last-layer Laplace model by parameter names and check that Hessian is identical to that of a full LLLaplace model - subnetmask_kwargs = dict(parameter_names=['1.weight', '1.bias']) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask_kwargs.update(parameter_names=['1.weight', '1.bias']) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert lllap.H.equal(lap.H) - # define valid parameter name subnet Laplace model - subnetmask_kwargs = dict(parameter_names=['0.weight', '1.bias']) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - n_params_subnet = 62 - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) + # define valid parameter name subnet mask + subnetmask_kwargs.update(parameter_names=['0.weight', '1.bias']) + subnetmask = subnetwork_mask(**subnetmask_kwargs) # should raise error if we access number of subnet parameters before selecting the subnet + n_params_subnet = 62 with pytest.raises(AttributeError): - n_params_subnet = lap._subnetwork_mask.n_params_subnet + n_params_subnet = subnetmask.n_params_subnet - # fit Laplace model + # select subnet mask and fit Laplace model + subnetmask.select(loader) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) + assert isinstance(lap, SubnetLaplace) elif subnetwork_mask == ModuleNameSubnetMask: # should raise error if we pass no module name list - subnetmask_kwargs = dict() + subnetmask_kwargs.update() with pytest.raises(TypeError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) # should raise error if we pass an empty module name list - subnetmask_kwargs = dict(module_names=[]) + subnetmask_kwargs.update(module_names=[]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - lap.fit(loader) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) # should raise error if we pass a module name list with invalid module names - subnetmask_kwargs = dict(module_names=['123']) + subnetmask_kwargs.update(module_names=['123']) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - lap.fit(loader) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) # define last-layer Laplace model by module name and check that Hessian is identical to that of a full LLLaplace model - subnetmask_kwargs = dict(module_names=['1']) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask_kwargs.update(module_names=['1']) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert lllap.H.equal(lap.H) - # define valid parameter name subnet Laplace model - subnetmask_kwargs = dict(module_names=['0']) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - n_params_subnet = 80 - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) + # define valid parameter name subnet mask + subnetmask_kwargs.update(module_names=['0']) + subnetmask = subnetwork_mask(**subnetmask_kwargs) # should raise error if we access number of subnet parameters before selecting the subnet + n_params_subnet = 80 with pytest.raises(AttributeError): - n_params_subnet = lap._subnetwork_mask.n_params_subnet + n_params_subnet = subnetmask.n_params_subnet - # fit Laplace model + # select subnet mask and fit Laplace model + subnetmask.select(loader) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) + assert isinstance(lap, SubnetLaplace) elif subnetwork_mask == LastLayerSubnetMask: # should raise error if we pass invalid last-layer name - subnetmask_kwargs = dict(last_layer_name='123') + subnetmask_kwargs.update(last_layer_name='123') with pytest.raises(KeyError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(loader) - # define valid last-layer subnet Laplace model (without passing the last-layer name) - subnetmask_kwargs = dict() - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) + # define valid last-layer subnet mask (without passing the last-layer name) + subnetmask_kwargs = dict(model=model) + subnetmask = subnetwork_mask(**subnetmask_kwargs) # should raise error if we access number of subnet parameters before selecting the subnet with pytest.raises(AttributeError): - n_params_subnet = lap._subnetwork_mask.n_params_subnet + n_params_subnet = subnetmask.n_params_subnet - # fit Laplace model + # select subnet mask and fit Laplace model + subnetmask.select(loader) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) + assert isinstance(lap, SubnetLaplace) # check that Hessian is identical to that of a full LLLaplace model assert lllap.H.equal(lap.H) - # define valid last-layer subnet Laplace model (with passing the last-layer name) - subnetmask_kwargs = dict(last_layer_name='1') - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) - n_params_subnet = 42 - assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) + # define valid last-layer subnet mask (with passing the last-layer name) + subnetmask_kwargs.update(last_layer_name='1') + subnetmask = subnetwork_mask(**subnetmask_kwargs) # should raise error if we access number of subnet parameters before selecting the subnet + n_params_subnet = 42 with pytest.raises(AttributeError): - n_params_subnet = lap._subnetwork_mask.n_params_subnet + n_params_subnet = subnetmask.n_params_subnet - # fit Laplace model + # select subnet mask and fit Laplace model + subnetmask.select(loader) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) + assert isinstance(lap, SubnetLaplace) # check that Hessian is identical to that of a full LLLaplace model assert lllap.H.equal(lap.H) # check some parameters - assert lap._subnetwork_mask.indices.equal(lap.backend.subnetwork_indices) - assert lap._subnetwork_mask.n_params_subnet == n_params_subnet + assert subnetmask.indices.equal(lap.backend.subnetwork_indices) + assert subnetmask.n_params_subnet == n_params_subnet assert lap.n_params_subnet == n_params_subnet # check that Hessian and prior precision is of correct shape @@ -274,14 +294,15 @@ def get_subnet_mask(self, train_loader): # define and fit valid full subnet Laplace model subnetwork_mask = FullSubnetMask - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full') + subnetmask = subnetwork_mask(model=model) + subnetmask.select(loader) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) # check some parameters - assert lap._subnetwork_mask.indices.equal(torch.tensor(list(range(model.n_params)))) - assert lap._subnetwork_mask.n_params_subnet == model.n_params + assert subnetmask.indices.equal(torch.tensor(list(range(model.n_params)))) + assert subnetmask.n_params_subnet == model.n_params assert lap.n_params_subnet == model.n_params # check that the Hessian is identical to that of a all-weights FullLaplace model @@ -292,18 +313,23 @@ def get_subnet_mask(self, train_loader): @pytest.mark.parametrize('subnetwork_mask', all_subnet_masks) def test_regression_predictive(model, reg_loader, subnetwork_mask): + subnetmask_kwargs = dict(model=model) if subnetwork_mask in score_based_subnet_masks: - subnetmask_kwargs = dict(n_params_subnet=32) + subnetmask_kwargs.update(n_params_subnet=32) + if subnetwork_mask == LargestVarianceSWAGSubnetMask: + subnetmask_kwargs.update(likelihood='regression') + elif subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask: + diag_laplace_model = DiagLaplace(model, 'regression') + subnetmask_kwargs.update(diag_laplace_model=diag_laplace_model) elif subnetwork_mask == ParamNameSubnetMask: - subnetmask_kwargs = dict(parameter_names=['0.weight', '1.bias']) + subnetmask_kwargs.update(parameter_names=['0.weight', '1.bias']) elif subnetwork_mask == ModuleNameSubnetMask: - subnetmask_kwargs = dict(module_names=['0']) - else: - subnetmask_kwargs = dict() - subnetmask_kwargs.update(dict(likelihood='regression') if subnetwork_mask == LargestVarianceSWAGSubnetMask else dict()) - lap = Laplace(model, likelihood='regression', subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask_kwargs.update(module_names=['0']) + + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(reg_loader) + lap = Laplace(model, likelihood='regression', subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) lap.fit(reg_loader) X, _ = reg_loader.dataset.tensors @@ -328,18 +354,23 @@ def test_regression_predictive(model, reg_loader, subnetwork_mask): @pytest.mark.parametrize('subnetwork_mask', all_subnet_masks) def test_classification_predictive(model, class_loader, subnetwork_mask): + subnetmask_kwargs = dict(model=model) if subnetwork_mask in score_based_subnet_masks: - subnetmask_kwargs = dict(n_params_subnet=32) + subnetmask_kwargs.update(n_params_subnet=32) + if subnetwork_mask == LargestVarianceSWAGSubnetMask: + subnetmask_kwargs.update(likelihood='classification') + elif subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask: + diag_laplace_model = DiagLaplace(model, 'classification') + subnetmask_kwargs.update(diag_laplace_model=diag_laplace_model) elif subnetwork_mask == ParamNameSubnetMask: - subnetmask_kwargs = dict(parameter_names=['0.weight', '1.bias']) + subnetmask_kwargs.update(parameter_names=['0.weight', '1.bias']) elif subnetwork_mask == ModuleNameSubnetMask: - subnetmask_kwargs = dict(module_names=['0']) - else: - subnetmask_kwargs = dict() - subnetmask_kwargs.update(dict(likelihood='classification') if subnetwork_mask == LargestVarianceSWAGSubnetMask else dict()) - lap = Laplace(model, likelihood='classification', subset_of_weights='subnetwork', subnetwork_mask=subnetwork_mask, hessian_structure='full', subnetmask_kwargs=subnetmask_kwargs) + subnetmask_kwargs.update(module_names=['0']) + + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select(class_loader) + lap = Laplace(model, likelihood='classification', subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') assert isinstance(lap, SubnetLaplace) - assert isinstance(lap._subnetwork_mask, subnetwork_mask) lap.fit(class_loader) X, _ = class_loader.dataset.tensors From 6de38c9821247a0ebf2a60e16c78c471b7113fb7 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Mon, 3 Jan 2022 18:13:18 +0000 Subject: [PATCH 32/49] Change subnetwork indices validity check to only allow long tensors --- laplace/subnetlaplace.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 25c5ae96..f624a29b 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -88,9 +88,9 @@ def _check_subnetwork_indices(self, subnetwork_indices): """ if subnetwork_indices is None: raise ValueError('Subnetwork indices cannot be None.') - elif not (isinstance(subnetwork_indices, torch.Tensor) and len(subnetwork_indices.shape) == 1 and\ - subnetwork_indices.dtype in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8]): - raise ValueError('Subnetwork indices need to be 1-dimensional integral torch.Tensor!') + elif not (isinstance(subnetwork_indices, torch.Tensor) and subnetwork_indices.numel() > 0\ + and len(subnetwork_indices.shape) == 1 and subnetwork_indices.dtype == torch.int64): + raise ValueError('Subnetwork indices must be non-empty, 1-dimensional torch.LongTensor.') elif not (len(subnetwork_indices[subnetwork_indices < 0]) == 0 and\ len(subnetwork_indices[subnetwork_indices >= self.n_params]) == 0): raise ValueError(f'Subnetwork indices must lie between 0 and n_params={self.n_params}.') From 7058b399469ec4dbfd06d2f51c7fb097c2da5ce1 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Mon, 3 Jan 2022 18:13:58 +0000 Subject: [PATCH 33/49] Add test for SubnetLaplace with custom subnetwork indices specification --- tests/test_subnetlaplace.py | 84 +++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 1a024a75..011fc0f9 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -66,6 +66,90 @@ def test_subnet_laplace_init(model, likelihood): lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) +@pytest.mark.parametrize('likelihood', likelihoods) +def test_custom_subnetwork_indices(model, likelihood, class_loader, reg_loader): + loader = class_loader if likelihood == 'classification' else reg_loader + + # subnetwork indices that are None should raise an error + subnetwork_indices = None + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are not PyTorch tensors should raise an error + subnetwork_indices = [0, 5, 11, 42] + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are empty tensors should raise an error + subnetwork_indices = torch.LongTensor([]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are not 1D PyTorch tensors should raise an error + subnetwork_indices = torch.LongTensor([[0, 5], [11, 42]]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are double tensors should raise an error + subnetwork_indices = torch.DoubleTensor([0.0, 5.0, 11.0, 42.0]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are float tensors should raise an error + subnetwork_indices = torch.FloatTensor([0.0, 5.0, 11.0, 42.0]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are half tensors should raise an error + subnetwork_indices = torch.HalfTensor([0.0, 5.0, 11.0, 42.0]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are int tensors should raise an error + subnetwork_indices = torch.IntTensor([0, 5, 11, 42]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are short tensors should raise an error + subnetwork_indices = torch.ShortTensor([0, 5, 11, 42]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are char tensors should raise an error + subnetwork_indices = torch.CharTensor([0, 5, 11, 42]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that are bool tensors should raise an error + subnetwork_indices = torch.BoolTensor([0, 5, 11, 42]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that contain elements smaller than zero should raise an error + subnetwork_indices = torch.LongTensor([0, -1, -11]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that contain elements larger than n_params should raise an error + subnetwork_indices = torch.LongTensor([model.n_params + 1, model.n_params + 42]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # subnetwork indices that contain duplicate entries should raise an error + subnetwork_indices = torch.LongTensor([0, 0, 5, 11, 11, 42]) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + + # Non-empty, 1-dimensional torch.LongTensor with valid entries should work + subnetwork_indices = torch.LongTensor([0, 5, 11, 42]) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap.fit(loader) + assert isinstance(lap, SubnetLaplace) + assert lap.n_params_subnet == 4 + assert lap.H.shape == (4, 4) + assert lap.backend.subnetwork_indices.equal(subnetwork_indices) + + @pytest.mark.parametrize('subnetwork_mask,likelihood', product(score_based_subnet_masks, likelihoods)) def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader): loader = class_loader if likelihood == 'classification' else reg_loader From e66ba5147720c1f88c4777527808739516980e7e Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Mon, 3 Jan 2022 18:24:58 +0000 Subject: [PATCH 34/49] Remove None default value for subnetwork indices in SubnetLaplace --- laplace/subnetlaplace.py | 4 ++-- tests/test_subnetlaplace.py | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index f624a29b..010215e5 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -47,7 +47,7 @@ class SubnetLaplace(FullLaplace): model : torch.nn.Module or `laplace.utils.feature_extractor.FeatureExtractor` likelihood : {'classification', 'regression'} determines the log likelihood Hessian approximation - subnetwork_indices : torch.Tensor, default=None + subnetwork_indices : torch.LongTensor indices of the vectorized model parameters that define the subnetwork to apply the Laplace approximation over sigma_noise : torch.Tensor or float, default=1 @@ -69,7 +69,7 @@ class SubnetLaplace(FullLaplace): # key to map to correct subclass of BaseLaplace, (subset of weights, Hessian structure) _key = ('subnetwork', 'full') - def __init__(self, model, likelihood, subnetwork_indices=None, sigma_noise=1., prior_precision=1., + def __init__(self, model, likelihood, subnetwork_indices, sigma_noise=1., prior_precision=1., prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): self.H = None super().__init__(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_precision, diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 011fc0f9..b472b6aa 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -54,6 +54,10 @@ def test_subnet_laplace_init(model, likelihood): lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) assert isinstance(lap, SubnetLaplace) + # subnet Laplace without specifying subnetwork indices should raise an error + with pytest.raises(TypeError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', hessian_structure=hessian_structure) + # subnet Laplace with diag, kron or lowrank Hessians should raise errors hessian_structure = 'diag' with pytest.raises(ValueError): From 589b8462cf074b7df80abe72e1f853058bbc56cf Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 4 Jan 2022 09:55:18 +0000 Subject: [PATCH 35/49] Add failing test case for scalar subnetwork indices --- tests/test_subnetlaplace.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index b472b6aa..1e98622c 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -89,6 +89,11 @@ def test_custom_subnetwork_indices(model, likelihood, class_loader, reg_loader): with pytest.raises(ValueError): lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + # subnetwork indices that are scalar tensors should raise an error + subnetwork_indices = torch.LongTensor(11) + with pytest.raises(ValueError): + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + # subnetwork indices that are not 1D PyTorch tensors should raise an error subnetwork_indices = torch.LongTensor([[0, 5], [11, 42]]) with pytest.raises(ValueError): From be245c8f39a62879259eb5fddcccca65196203ec Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 4 Jan 2022 09:56:02 +0000 Subject: [PATCH 36/49] Change SubnetMask.select() to return subnet indices and improve documentation --- laplace/utils/subnetmask.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/laplace/utils/subnetmask.py b/laplace/utils/subnetmask.py index f1213d55..9a74e705 100644 --- a/laplace/utils/subnetmask.py +++ b/laplace/utils/subnetmask.py @@ -49,11 +49,14 @@ def convert_subnet_mask_to_indices(self, subnet_mask): subnet_mask : torch.Tensor a binary vector of size (n_params) where 1s locate the subnetwork parameters within the vectorized model parameters + (i.e. `torch.nn.utils.parameters_to_vector(model.parameters())`) Returns ------- - subnet_mask_indices : torch.Tensor - a vector of indices of the vectorized model parameters that define the subnetwork + subnet_mask_indices : torch.LongTensor + a vector of indices of the vectorized model parameters + (i.e. `torch.nn.utils.parameters_to_vector(model.parameters())`) + that define the subnetwork """ if not isinstance(subnet_mask, torch.Tensor): raise ValueError('Subnetwork mask needs to be torch.Tensor!') @@ -63,7 +66,8 @@ def convert_subnet_mask_to_indices(self, subnet_mask): elif len(subnet_mask) != self._n_params or\ len(subnet_mask[subnet_mask == 0]) + len(subnet_mask[subnet_mask == 1]) != self._n_params: raise ValueError('Subnetwork mask needs to be a binary vector of size (n_params) where 1s'\ - 'locate the subnetwork parameters within the vectorized model parameters!') + 'locate the subnetwork parameters within the vectorized model parameters'\ + '(i.e. `torch.nn.utils.parameters_to_vector(model.parameters())`)!') subnet_mask_indices = subnet_mask.nonzero(as_tuple=True)[0] return subnet_mask_indices @@ -76,12 +80,20 @@ def select(self, train_loader=None): train_loader : torch.data.utils.DataLoader, default=None each iterate is a training batch (X, y); `train_loader.dataset` needs to be set to access \\(N\\), size of the data set + + Returns + ------- + subnet_mask_indices : torch.LongTensor + a vector of indices of the vectorized model parameters + (i.e. `torch.nn.utils.parameters_to_vector(model.parameters())`) + that define the subnetwork """ if self._indices is not None: raise ValueError('Subnetwork mask already selected.') subnet_mask = self.get_subnet_mask(train_loader) self._indices = self.convert_subnet_mask_to_indices(subnet_mask) + return self._indices def get_subnet_mask(self, train_loader): """ Get the subnetwork mask. @@ -97,6 +109,7 @@ def get_subnet_mask(self, train_loader): subnet_mask: torch.Tensor a binary vector of size (n_params) where 1s locate the subnetwork parameters within the vectorized model parameters + (i.e. `torch.nn.utils.parameters_to_vector(model.parameters())`) """ raise NotImplementedError From 6ae4f9f63560db08ac7263ccc5d5db0ac9fd4bf8 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 4 Jan 2022 09:56:34 +0000 Subject: [PATCH 37/49] Minor refactorings (subnet indices checks and documentation) --- laplace/subnetlaplace.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 010215e5..6b04cf10 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -48,8 +48,9 @@ class SubnetLaplace(FullLaplace): likelihood : {'classification', 'regression'} determines the log likelihood Hessian approximation subnetwork_indices : torch.LongTensor - indices of the vectorized model parameters that define the subnetwork - to apply the Laplace approximation over + indices of the vectorized model parameters + (i.e. `torch.nn.utils.parameters_to_vector(model.parameters())`) + that define the subnetwork to apply the Laplace approximation over sigma_noise : torch.Tensor or float, default=1 observation noise for the regression setting; must be 1 for classification prior_precision : torch.Tensor or float, default=1 @@ -84,18 +85,19 @@ def _init_H(self): self.H = torch.zeros(self.n_params_subnet, self.n_params_subnet, device=self._device) def _check_subnetwork_indices(self, subnetwork_indices): - """Check that subnetwork indices are valid indices of the vectorized model parameters. + """Check that subnetwork indices are valid indices of the vectorized model parameters + (i.e. `torch.nn.utils.parameters_to_vector(model.parameters())`). """ if subnetwork_indices is None: raise ValueError('Subnetwork indices cannot be None.') - elif not (isinstance(subnetwork_indices, torch.Tensor) and subnetwork_indices.numel() > 0\ - and len(subnetwork_indices.shape) == 1 and subnetwork_indices.dtype == torch.int64): + elif not (isinstance(subnetwork_indices, torch.LongTensor) and\ + subnetwork_indices.numel() > 0 and len(subnetwork_indices.shape) == 1): raise ValueError('Subnetwork indices must be non-empty, 1-dimensional torch.LongTensor.') elif not (len(subnetwork_indices[subnetwork_indices < 0]) == 0 and\ len(subnetwork_indices[subnetwork_indices >= self.n_params]) == 0): raise ValueError(f'Subnetwork indices must lie between 0 and n_params={self.n_params}.') elif not (subnetwork_indices.sort()[0].equal(torch.unique(subnetwork_indices, sorted=True))): - raise ValueError('Subnetwork indices must be unique.') + raise ValueError('Subnetwork indices must not contain duplicate entries.') @property def prior_precision_diag(self): From 288f7678aea11c3f97b7ffa21dded511841c1a1e Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Tue, 4 Jan 2022 09:57:15 +0000 Subject: [PATCH 38/49] Add README example for SubnetLaplace --- README.md | 47 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index fbe90ee0..b7543727 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ pytest tests/ ## Structure The laplace package consists of two main components: -1. The subclasses of [`laplace.BaseLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/baselaplace.py) that implement different sparsity structures: different subsets of weights (`'all'`, `'subnetwork'` and `'last_layer'`) and different structures of the Hessian approximation (`'full'`, `'kron'`, `'lowrank'` and `'diag'`). This results in _eight_ currently available options: `laplace.FullLaplace`, `laplace.KronLaplace`, `laplace.DiagLaplace`, the corresponding last-layer variations `laplace.FullLLLaplace`, `laplace.KronLLLaplace`, and `laplace.DiagLLLaplace` (which are all subclasses of [`laplace.LLLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/lllaplace.py)), `laplace.SubnetLaplace` (which only supports a `'full'` Hessian approximation) and `laplace.LowRankLaplace` (which only supports inference over `'all'` weights). All of these can be conveniently accessed via the [`laplace.Laplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/laplace.py) function. +1. The subclasses of [`laplace.BaseLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/baselaplace.py) that implement different sparsity structures: different subsets of weights (`'all'`, `'subnetwork'` and `'last_layer'`) and different structures of the Hessian approximation (`'full'`, `'kron'`, `'lowrank'` and `'diag'`). This results in _eight_ currently available options: `laplace.FullLaplace`, `laplace.KronLaplace`, `laplace.DiagLaplace`, the corresponding last-layer variations `laplace.FullLLLaplace`, `laplace.KronLLLaplace`, and `laplace.DiagLLLaplace` (which are all subclasses of [`laplace.LLLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/lllaplace.py)), [`laplace.SubnetLaplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/subnetlaplace.py) (which only supports a `'full'` Hessian approximation) and `laplace.LowRankLaplace` (which only supports inference over `'all'` weights). All of these can be conveniently accessed via the [`laplace.Laplace`](https://github.com/AlexImmer/Laplace/blob/main/laplace/laplace.py) function. 2. The backends in [`laplace.curvature`](https://github.com/AlexImmer/Laplace/blob/main/laplace/curvature/) which provide access to Hessian approximations of the corresponding sparsity structures, for example, the diagonal GGN. @@ -48,7 +48,7 @@ decomposing a neural network into feature extractor and last layer for `LLLaplac and effectively dealing with Kronecker factors ([`laplace.utils.matrix`](https://github.com/AlexImmer/Laplace/blob/main/laplace/utils/matrix.py)). -Finally, the package implements several options to select/specify a subnetwork for `laplace.SubnetLaplace` (as subclasses of [`laplace.utils.subnetmask.SubnetMask`](https://github.com/AlexImmer/Laplace/blob/main/laplace/utils/subnetmask.py). +Finally, the package implements several options to select/specify a subnetwork for `SubnetLaplace` (as subclasses of [`laplace.utils.subnetmask.SubnetMask`](https://github.com/AlexImmer/Laplace/blob/main/laplace/utils/subnetmask.py)). Automatic subnetwork selection strategies include: uniformly at random (`laplace.utils.subnetmask.RandomSubnetMask`), by largest parameter magnitudes (`LargestMagnitudeSubnetMask`), and by largest marginal parameter variances (`LargestVarianceDiagLaplaceSubnetMask` and `LargestVarianceSWAGSubnetMask`). In addition to that, subnetworks can also be specified manually, by listing the names of either the model parameters (`ParamNameSubnetMask`) or modules (`ModuleNameSubnetMask`) to perform Laplace inference over. @@ -78,7 +78,7 @@ the `'probit'` predictive for classification. ```python from laplace import Laplace -# pre-trained model +# Pre-trained model model = load_map_model() # User-specified LA flavor @@ -94,7 +94,7 @@ pred = la(x, link_approx='probit') ### Differentiating the log marginal likelihood w.r.t. hyperparameters -The marginal likelihood can be used for model selection and is differentiable +The marginal likelihood can be used for model selection [10] and is differentiable for continuous hyperparameters like the prior precision or observation noise. Here, we fit the library default, KFAC last-layer LA and differentiate the log marginal likelihood. @@ -114,6 +114,45 @@ ml = la.log_marginal_likelihood(prior_prec, obs_noise) ml.backward() ``` +### Applying the LA over only a subset of the model parameters + +This example shows how to fit the Laplace approximation over only +a subnetwork within a neural network (while keeping all other parameters +fixed at their MAP estimates), as proposed in [11]. It also exemplifies +different ways to specify the subnetwork to perform inference over. + +```python +from laplace import Laplace + +# Pre-trained model +model = load_model() + +# Examples of different ways to specify the subnetwork +# via indices of the vectorized model parameters +# +# Example 1: select the 128 parameters with the largest magnitude +from laplace.utils.subnetmask import LargestMagnitudeSubnetMask +subnetwork_mask = LargestMagnitudeSubnetMask(model, n_params_subnet=128) +subnetwork_indices = subnetwork_mask.select() + +# Example 2: specify the layers that define the subnetwork +from laplace.utils.subnetmask import ModuleNameSubnetMask +subnetwork_mask = ModuleNameSubnetMask(model, module_names=['layer.1', 'layer.3']) +subnetwork_mask.select() +subnetwork_indices = subnetwork_mask.indices + +# Example 3: manually define the subnetwork via custom subnetwork indices +import torch +subnetwork_indices = torch.tensor([0, 4, 11, 42, 123, 2021]) + +# Define and fit subnetwork LA using the specified subnetwork indices +la = Laplace(model, 'classification', + subset_of_weights='subnetwork', + hessian_structure='full', + subnetwork_indices=subnetwork_indices) +la.fit(train_loader) +``` + ## Documentation The documentation is available [here](https://aleximmer.github.io/Laplace) or can be generated and/or viewed locally: From b5d8adfe34d55e96152eb832a9da8082b50a9034 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 09:24:04 +0000 Subject: [PATCH 39/49] Add __all__ for utils.py --- laplace/utils/utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/laplace/utils/utils.py b/laplace/utils/utils.py index 5b059d31..a00dc2f4 100644 --- a/laplace/utils/utils.py +++ b/laplace/utils/utils.py @@ -8,6 +8,10 @@ from torch.distributions.multivariate_normal import _precision_to_scale_tril +__all__ = ['get_nll', 'validate', 'parameters_per_layer', 'invsqrt_precision', 'kron', + 'diagonal_add_scalar', 'symeig', 'block_diag', 'expand_prior_precision'] + + def get_nll(out_dist, targets): return F.nll_loss(torch.log(out_dist), targets) From 557aa05e98484da75d0cbe114799f0055fd02c12 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 09:24:20 +0000 Subject: [PATCH 40/49] Add __all__ for swag.py --- laplace/utils/swag.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/laplace/utils/swag.py b/laplace/utils/swag.py index d780fe16..8e6fb514 100644 --- a/laplace/utils/swag.py +++ b/laplace/utils/swag.py @@ -4,7 +4,10 @@ from torch.nn.utils import parameters_to_vector -def param_vector(model): +__all__ = ['fit_diagonal_swag'] + + +def _param_vector(model): return parameters_to_vector(model.parameters()).detach() @@ -55,8 +58,8 @@ def fit_diagonal_swag(model, train_loader, criterion, n_snapshots_total=40, snap device = next(_model.parameters()).device # initialize running estimates of first and second moment of model parameters - mean = torch.zeros_like(param_vector(_model)) - sq_mean = torch.zeros_like(param_vector(_model)) + mean = torch.zeros_like(_param_vector(_model)) + sq_mean = torch.zeros_like(_param_vector(_model)) n_snapshots = 0 # run SGD to collect model snapshots @@ -72,8 +75,8 @@ def fit_diagonal_swag(model, train_loader, criterion, n_snapshots_total=40, snap if epoch % snapshot_freq == 0: # update running estimates of first and second moment of model parameters - mean = mean * n_snapshots / (n_snapshots + 1) + param_vector(_model) / (n_snapshots + 1) - sq_mean = sq_mean * n_snapshots / (n_snapshots + 1) + param_vector(_model) ** 2 / (n_snapshots + 1) + mean = mean * n_snapshots / (n_snapshots + 1) + _param_vector(_model) / (n_snapshots + 1) + sq_mean = sq_mean * n_snapshots / (n_snapshots + 1) + _param_vector(_model) ** 2 / (n_snapshots + 1) n_snapshots += 1 # compute marginal parameter variances, Var[P] = E[P^2] - E[P]^2 From 4ab30df9c7668a024a8be593a1a781e3a9bd912a Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 09:25:57 +0000 Subject: [PATCH 41/49] Add __init__.py to utils/ to simplify utility imports --- README.md | 4 ++-- laplace/baselaplace.py | 3 +-- laplace/curvature/asdl.py | 3 +-- laplace/curvature/backpack.py | 2 +- laplace/lllaplace.py | 4 +--- laplace/marglik_training.py | 2 +- laplace/utils/__init__.py | 14 ++++++++++++++ laplace/utils/matrix.py | 2 +- laplace/utils/subnetmask.py | 7 ++++--- tests/test_baselaplace.py | 2 +- tests/test_feature_extractor.py | 2 +- tests/test_jacobians.py | 2 +- tests/test_lllaplace.py | 2 +- tests/test_matrix.py | 5 ++--- tests/test_subnetlaplace.py | 4 ++-- tests/test_utils.py | 2 +- 16 files changed, 35 insertions(+), 25 deletions(-) create mode 100644 laplace/utils/__init__.py diff --git a/README.md b/README.md index b7543727..d3590abd 100644 --- a/README.md +++ b/README.md @@ -131,12 +131,12 @@ model = load_model() # via indices of the vectorized model parameters # # Example 1: select the 128 parameters with the largest magnitude -from laplace.utils.subnetmask import LargestMagnitudeSubnetMask +from laplace.utils import LargestMagnitudeSubnetMask subnetwork_mask = LargestMagnitudeSubnetMask(model, n_params_subnet=128) subnetwork_indices = subnetwork_mask.select() # Example 2: specify the layers that define the subnetwork -from laplace.utils.subnetmask import ModuleNameSubnetMask +from laplace.utils import ModuleNameSubnetMask subnetwork_mask = ModuleNameSubnetMask(model, module_names=['layer.1', 'layer.3']) subnetwork_mask.select() subnetwork_indices = subnetwork_mask.indices diff --git a/laplace/baselaplace.py b/laplace/baselaplace.py index 2c382d98..09c05ca3 100644 --- a/laplace/baselaplace.py +++ b/laplace/baselaplace.py @@ -4,8 +4,7 @@ from torch.nn.utils import parameters_to_vector, vector_to_parameters from torch.distributions import MultivariateNormal, Dirichlet, Normal -from laplace.utils.utils import parameters_per_layer, invsqrt_precision, get_nll, validate -from laplace.utils.matrix import Kron +from laplace.utils import parameters_per_layer, invsqrt_precision, get_nll, validate, Kron from laplace.curvature import BackPackGGN, AsdlHessian diff --git a/laplace/curvature/asdl.py b/laplace/curvature/asdl.py index a3ccd3d6..dac769fe 100644 --- a/laplace/curvature/asdl.py +++ b/laplace/curvature/asdl.py @@ -9,8 +9,7 @@ from asdfghjkl.gradient import batch_gradient from laplace.curvature import CurvatureInterface, GGNInterface, EFInterface -from laplace.utils.matrix import Kron -from laplace.utils.utils import _is_batchnorm +from laplace.utils import Kron, _is_batchnorm EPS = 1e-6 diff --git a/laplace/curvature/backpack.py b/laplace/curvature/backpack.py index 5c78a093..8cffc154 100644 --- a/laplace/curvature/backpack.py +++ b/laplace/curvature/backpack.py @@ -5,7 +5,7 @@ from backpack.context import CTX from laplace.curvature import CurvatureInterface, GGNInterface, EFInterface -from laplace.utils.matrix import Kron +from laplace.utils import Kron class BackPackInterface(CurvatureInterface): diff --git a/laplace/lllaplace.py b/laplace/lllaplace.py index 8f93ff53..73c552df 100644 --- a/laplace/lllaplace.py +++ b/laplace/lllaplace.py @@ -3,9 +3,7 @@ from torch.nn.utils import parameters_to_vector, vector_to_parameters from laplace.baselaplace import ParametricLaplace, FullLaplace, KronLaplace, DiagLaplace -from laplace.utils.feature_extractor import FeatureExtractor - -from laplace.utils.matrix import Kron +from laplace.utils import FeatureExtractor, Kron from laplace.curvature import BackPackGGN diff --git a/laplace/marglik_training.py b/laplace/marglik_training.py index 3a2b2161..ec100542 100644 --- a/laplace/marglik_training.py +++ b/laplace/marglik_training.py @@ -9,7 +9,7 @@ from laplace import Laplace from laplace.curvature import AsdlGGN -from laplace.utils.utils import expand_prior_precision +from laplace.utils import expand_prior_precision def marglik_training( diff --git a/laplace/utils/__init__.py b/laplace/utils/__init__.py new file mode 100644 index 00000000..cc05d4d9 --- /dev/null +++ b/laplace/utils/__init__.py @@ -0,0 +1,14 @@ +from laplace.utils.utils import get_nll, validate, parameters_per_layer, invsqrt_precision, _is_batchnorm, _is_valid_scalar, kron, diagonal_add_scalar, symeig, block_diag, expand_prior_precision +from laplace.utils.feature_extractor import FeatureExtractor +from laplace.utils.matrix import Kron, KronDecomposed +from laplace.utils.swag import fit_diagonal_swag +from laplace.utils.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask + + +__all__ = ['get_nll', 'validate', 'parameters_per_layer', 'invsqrt_precision', 'kron', + 'diagonal_add_scalar', 'symeig', 'block_diag', 'expand_prior_precision', + 'FeatureExtractor', + 'Kron', 'KronDecomposed', + 'fit_diagonal_swag', + 'SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', + 'LargestVarianceSWAGSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] diff --git a/laplace/utils/matrix.py b/laplace/utils/matrix.py index 30b0245d..61c07ab5 100644 --- a/laplace/utils/matrix.py +++ b/laplace/utils/matrix.py @@ -3,7 +3,7 @@ import numpy as np from typing import Union -from laplace.utils.utils import _is_valid_scalar, symeig, kron, block_diag +from laplace.utils import _is_valid_scalar, symeig, kron, block_diag class Kron: diff --git a/laplace/utils/subnetmask.py b/laplace/utils/subnetmask.py index 9a74e705..9609401a 100644 --- a/laplace/utils/subnetmask.py +++ b/laplace/utils/subnetmask.py @@ -4,10 +4,11 @@ from torch.nn import CrossEntropyLoss, MSELoss from torch.nn.utils import parameters_to_vector -from laplace.utils.feature_extractor import FeatureExtractor -from laplace.utils.swag import fit_diagonal_swag +from laplace.utils import FeatureExtractor, fit_diagonal_swag -__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', 'LargestVarianceSWAGSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] + +__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', + 'LargestVarianceSWAGSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] class SubnetMask: diff --git a/tests/test_baselaplace.py b/tests/test_baselaplace.py index a9292e8d..36fe8a16 100644 --- a/tests/test_baselaplace.py +++ b/tests/test_baselaplace.py @@ -12,7 +12,7 @@ from torchvision.models import wide_resnet50_2 from laplace.laplace import FullLaplace, KronLaplace, DiagLaplace, LowRankLaplace -from laplace.utils.matrix import KronDecomposed +from laplace.utils import KronDecomposed from tests.utils import jacobians_naive diff --git a/tests/test_feature_extractor.py b/tests/test_feature_extractor.py index b80bbcb4..d3b95ad5 100644 --- a/tests/test_feature_extractor.py +++ b/tests/test_feature_extractor.py @@ -2,7 +2,7 @@ import torch.nn as nn import torchvision.models as models -from laplace.utils.feature_extractor import FeatureExtractor +from laplace.utils import FeatureExtractor class CNN(nn.Module): diff --git a/tests/test_jacobians.py b/tests/test_jacobians.py index 0495adb3..13d2466e 100644 --- a/tests/test_jacobians.py +++ b/tests/test_jacobians.py @@ -3,7 +3,7 @@ from torch import nn from laplace.curvature import AsdlInterface, BackPackInterface -from laplace.utils.feature_extractor import FeatureExtractor +from laplace.utils import FeatureExtractor from tests.utils import jacobians_naive diff --git a/tests/test_lllaplace.py b/tests/test_lllaplace.py index bc6f7a5e..0e6855aa 100644 --- a/tests/test_lllaplace.py +++ b/tests/test_lllaplace.py @@ -9,7 +9,7 @@ from torchvision.models import wide_resnet50_2 from laplace.lllaplace import FullLLLaplace, KronLLLaplace, DiagLLLaplace -from laplace.utils.feature_extractor import FeatureExtractor +from laplace.utils import FeatureExtractor from tests.utils import jacobians_naive diff --git a/tests/test_matrix.py b/tests/test_matrix.py index 66a5da48..7c366990 100644 --- a/tests/test_matrix.py +++ b/tests/test_matrix.py @@ -4,10 +4,9 @@ from torch import nn from torch.nn.utils import parameters_to_vector -from laplace.utils.matrix import Kron -from laplace.utils.utils import kron as kron_prod +from laplace.utils import Kron, block_diag +from laplace.utils import kron as kron_prod from laplace.curvature import BackPackGGN -from laplace.utils.utils import block_diag from tests.utils import get_psd_matrix, jacobians_naive diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 1e98622c..8e0d3d18 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -1,5 +1,5 @@ import pytest -from itertools import product +from itertools import product import torch from torch import nn @@ -8,7 +8,7 @@ from laplace import Laplace, SubnetLaplace from laplace.baselaplace import DiagLaplace -from laplace.utils.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask +from laplace.utils import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask torch.manual_seed(240) diff --git a/tests/test_utils.py b/tests/test_utils.py index b673be3d..1ad0f517 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,5 +1,5 @@ import torch -from laplace.utils.utils import invsqrt_precision, diagonal_add_scalar, symeig +from laplace.utils import invsqrt_precision, diagonal_add_scalar, symeig def test_sqrt_precision(): From 99d3fb2dd54444fb7e3c11cc903d5c1a00d80bd2 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 09:32:33 +0000 Subject: [PATCH 42/49] Simplify check for duplicate indices in Subnet Laplace --- laplace/subnetlaplace.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 6b04cf10..4f815bc4 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -96,7 +96,7 @@ def _check_subnetwork_indices(self, subnetwork_indices): elif not (len(subnetwork_indices[subnetwork_indices < 0]) == 0 and\ len(subnetwork_indices[subnetwork_indices >= self.n_params]) == 0): raise ValueError(f'Subnetwork indices must lie between 0 and n_params={self.n_params}.') - elif not (subnetwork_indices.sort()[0].equal(torch.unique(subnetwork_indices, sorted=True))): + elif not (len(subnetwork_indices.unique()) == len(subnetwork_indices)): raise ValueError('Subnetwork indices must not contain duplicate entries.') @property From 61865737eeeeb76040642fd3f9fbcd59494c3a2c Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 09:35:19 +0000 Subject: [PATCH 43/49] Add __all__ for matrix.py --- laplace/utils/matrix.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/laplace/utils/matrix.py b/laplace/utils/matrix.py index 61c07ab5..14a84bfe 100644 --- a/laplace/utils/matrix.py +++ b/laplace/utils/matrix.py @@ -6,6 +6,9 @@ from laplace.utils import _is_valid_scalar, symeig, kron, block_diag +__all__ = ['Kron', 'KronDecomposed'] + + class Kron: """Kronecker factored approximate curvature representation for a corresponding neural network. From 86b91bc4e806d5fb5e659eefbf4c70fa6ab473f4 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 09:37:13 +0000 Subject: [PATCH 44/49] Add line breaks with proper indents for __all__ in subnetmask.py --- laplace/utils/subnetmask.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/laplace/utils/subnetmask.py b/laplace/utils/subnetmask.py index 9609401a..607b8f60 100644 --- a/laplace/utils/subnetmask.py +++ b/laplace/utils/subnetmask.py @@ -7,8 +7,9 @@ from laplace.utils import FeatureExtractor, fit_diagonal_swag -__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', - 'LargestVarianceSWAGSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] +__all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', + 'LargestVarianceDiagLaplaceSubnetMask', 'LargestVarianceSWAGSubnetMask', + 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] class SubnetMask: From 3b54e8a858b0086cbc546292f32339798258339a Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 09:45:52 +0000 Subject: [PATCH 45/49] Change name of fit_diagonal_swag() to fit_diagonal_swag_var() --- laplace/utils/__init__.py | 4 ++-- laplace/utils/subnetmask.py | 4 ++-- laplace/utils/swag.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/laplace/utils/__init__.py b/laplace/utils/__init__.py index cc05d4d9..10f559e0 100644 --- a/laplace/utils/__init__.py +++ b/laplace/utils/__init__.py @@ -1,7 +1,7 @@ from laplace.utils.utils import get_nll, validate, parameters_per_layer, invsqrt_precision, _is_batchnorm, _is_valid_scalar, kron, diagonal_add_scalar, symeig, block_diag, expand_prior_precision from laplace.utils.feature_extractor import FeatureExtractor from laplace.utils.matrix import Kron, KronDecomposed -from laplace.utils.swag import fit_diagonal_swag +from laplace.utils.swag import fit_diagonal_swag_var from laplace.utils.subnetmask import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask @@ -9,6 +9,6 @@ 'diagonal_add_scalar', 'symeig', 'block_diag', 'expand_prior_precision', 'FeatureExtractor', 'Kron', 'KronDecomposed', - 'fit_diagonal_swag', + 'fit_diagonal_swag_var', 'SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', 'LargestVarianceDiagLaplaceSubnetMask', 'LargestVarianceSWAGSubnetMask', 'ParamNameSubnetMask', 'ModuleNameSubnetMask', 'LastLayerSubnetMask'] diff --git a/laplace/utils/subnetmask.py b/laplace/utils/subnetmask.py index 607b8f60..d76751d3 100644 --- a/laplace/utils/subnetmask.py +++ b/laplace/utils/subnetmask.py @@ -4,7 +4,7 @@ from torch.nn import CrossEntropyLoss, MSELoss from torch.nn.utils import parameters_to_vector -from laplace.utils import FeatureExtractor, fit_diagonal_swag +from laplace.utils import FeatureExtractor, fit_diagonal_swag_var __all__ = ['SubnetMask', 'RandomSubnetMask', 'LargestMagnitudeSubnetMask', @@ -225,7 +225,7 @@ def compute_param_scores(self, train_loader): criterion = CrossEntropyLoss(reduction='mean') elif self.likelihood == 'regression': criterion = MSELoss(reduction='mean') - param_variances = fit_diagonal_swag(self.model, train_loader, criterion, n_snapshots_total=self.swag_n_snapshots, snapshot_freq=self.swag_snapshot_freq, lr=self.swag_lr) + param_variances = fit_diagonal_swag_var(self.model, train_loader, criterion, n_snapshots_total=self.swag_n_snapshots, snapshot_freq=self.swag_snapshot_freq, lr=self.swag_lr) return param_variances diff --git a/laplace/utils/swag.py b/laplace/utils/swag.py index 8e6fb514..7b2e529f 100644 --- a/laplace/utils/swag.py +++ b/laplace/utils/swag.py @@ -4,14 +4,14 @@ from torch.nn.utils import parameters_to_vector -__all__ = ['fit_diagonal_swag'] +__all__ = ['fit_diagonal_swag_var'] def _param_vector(model): return parameters_to_vector(model.parameters()).detach() -def fit_diagonal_swag(model, train_loader, criterion, n_snapshots_total=40, snapshot_freq=1, lr=0.01, momentum=0.9, weight_decay=3e-4, min_var=1e-30): +def fit_diagonal_swag_var(model, train_loader, criterion, n_snapshots_total=40, snapshot_freq=1, lr=0.01, momentum=0.9, weight_decay=3e-4, min_var=1e-30): """ Fit diagonal SWAG [1], which estimates marginal variances of model parameters by computing the first and second moment of SGD iterates with a large learning rate. From d5d2d2348c46ab3f770712293b86aa63b10c2e35 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 11:43:43 +0000 Subject: [PATCH 46/49] Shorten lines to 100 chars and change line breaks from backslash to bracket --- laplace/subnetlaplace.py | 12 ++-- laplace/utils/subnetmask.py | 36 ++++++++---- laplace/utils/swag.py | 11 ++-- tests/test_subnetlaplace.py | 114 ++++++++++++++++++++++++------------ 4 files changed, 112 insertions(+), 61 deletions(-) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 4f815bc4..32767ba2 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -73,9 +73,9 @@ class SubnetLaplace(FullLaplace): def __init__(self, model, likelihood, subnetwork_indices, sigma_noise=1., prior_precision=1., prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None): self.H = None - super().__init__(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_precision, - prior_mean=prior_mean, temperature=temperature, backend=backend, - backend_kwargs=backend_kwargs) + super().__init__(model, likelihood, sigma_noise=sigma_noise, + prior_precision=prior_precision, prior_mean=prior_mean, + temperature=temperature, backend=backend, backend_kwargs=backend_kwargs) # check validity of subnetwork indices and pass them to backend self._check_subnetwork_indices(subnetwork_indices) self.backend.subnetwork_indices = subnetwork_indices @@ -90,10 +90,10 @@ def _check_subnetwork_indices(self, subnetwork_indices): """ if subnetwork_indices is None: raise ValueError('Subnetwork indices cannot be None.') - elif not (isinstance(subnetwork_indices, torch.LongTensor) and\ + elif not (isinstance(subnetwork_indices, torch.LongTensor) and subnetwork_indices.numel() > 0 and len(subnetwork_indices.shape) == 1): - raise ValueError('Subnetwork indices must be non-empty, 1-dimensional torch.LongTensor.') - elif not (len(subnetwork_indices[subnetwork_indices < 0]) == 0 and\ + raise ValueError('Subnetwork indices must be non-empty 1-dimensional torch.LongTensor.') + elif not (len(subnetwork_indices[subnetwork_indices < 0]) == 0 and len(subnetwork_indices[subnetwork_indices >= self.n_params]) == 0): raise ValueError(f'Subnetwork indices must lie between 0 and n_params={self.n_params}.') elif not (len(subnetwork_indices.unique()) == len(subnetwork_indices)): diff --git a/laplace/utils/subnetmask.py b/laplace/utils/subnetmask.py index d76751d3..00d73ff4 100644 --- a/laplace/utils/subnetmask.py +++ b/laplace/utils/subnetmask.py @@ -62,13 +62,15 @@ def convert_subnet_mask_to_indices(self, subnet_mask): """ if not isinstance(subnet_mask, torch.Tensor): raise ValueError('Subnetwork mask needs to be torch.Tensor!') - elif subnet_mask.dtype not in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool] or\ - len(subnet_mask.shape) != 1: - raise ValueError('Subnetwork mask needs to be 1-dimensional integral or boolean tensor!') - elif len(subnet_mask) != self._n_params or\ - len(subnet_mask[subnet_mask == 0]) + len(subnet_mask[subnet_mask == 1]) != self._n_params: - raise ValueError('Subnetwork mask needs to be a binary vector of size (n_params) where 1s'\ - 'locate the subnetwork parameters within the vectorized model parameters'\ + elif subnet_mask.dtype not in [torch.int64, torch.int32, torch.int16, torch.int8, + torch.uint8, torch.bool] or len(subnet_mask.shape) != 1: + raise ValueError( + 'Subnetwork mask needs to be 1-dimensional integral or boolean tensor!') + elif (len(subnet_mask) != self._n_params or len(subnet_mask[subnet_mask == 0]) + + len(subnet_mask[subnet_mask == 1]) != self._n_params): + raise ValueError('Subnetwork mask needs to be a binary vector of' + 'size (n_params) where 1s locate the subnetwork' + 'parameters within the vectorized model parameters' '(i.e. `torch.nn.utils.parameters_to_vector(model.parameters())`)!') subnet_mask_indices = subnet_mask.nonzero(as_tuple=True)[0] @@ -117,7 +119,8 @@ def get_subnet_mask(self, train_loader): class ScoreBasedSubnetMask(SubnetMask): - """Baseclass for subnetwork masks defined by selecting the top-scoring parameters according to some criterion. + """Baseclass for subnetwork masks defined by selecting + the top-scoring parameters according to some criterion. Parameters ---------- @@ -129,9 +132,11 @@ def __init__(self, model, n_params_subnet): super().__init__(model) if n_params_subnet is None: - raise ValueError(f'Need to pass number of subnetwork parameters when using subnetwork Laplace.') + raise ValueError( + 'Need to pass number of subnetwork parameters when using subnetwork Laplace.') if n_params_subnet > self._n_params: - raise ValueError(f'Subnetwork ({n_params_subnet}) cannot be larger than model ({self._n_params}).') + raise ValueError( + f'Subnetwork ({n_params_subnet}) cannot be larger than model ({self._n_params}).') self._n_params_subnet = n_params_subnet self._param_scores = None @@ -210,7 +215,8 @@ class LargestVarianceSWAGSubnetMask(ScoreBasedSubnetMask): swag_lr : float learning rate for SWAG snapshot collection """ - def __init__(self, model, n_params_subnet, likelihood='classification', swag_n_snapshots=40, swag_snapshot_freq=1, swag_lr=0.01): + def __init__(self, model, n_params_subnet, likelihood='classification', + swag_n_snapshots=40, swag_snapshot_freq=1, swag_lr=0.01): super().__init__(model, n_params_subnet) self.likelihood = likelihood self.swag_n_snapshots = swag_n_snapshots @@ -225,7 +231,10 @@ def compute_param_scores(self, train_loader): criterion = CrossEntropyLoss(reduction='mean') elif self.likelihood == 'regression': criterion = MSELoss(reduction='mean') - param_variances = fit_diagonal_swag_var(self.model, train_loader, criterion, n_snapshots_total=self.swag_n_snapshots, snapshot_freq=self.swag_snapshot_freq, lr=self.swag_lr) + param_variances = fit_diagonal_swag_var(self.model, train_loader, criterion, + n_snapshots_total=self.swag_n_snapshots, + snapshot_freq=self.swag_snapshot_freq, + lr=self.swag_lr) return param_variances @@ -236,7 +245,8 @@ class ParamNameSubnetMask(SubnetMask): ---------- model : torch.nn.Module parameter_names: List[str] - list of names of the parameters (as in `model.named_parameters()`) that define the subnetwork + list of names of the parameters (as in `model.named_parameters()`) + that define the subnetwork """ def __init__(self, model, parameter_names): super().__init__(model) diff --git a/laplace/utils/swag.py b/laplace/utils/swag.py index 7b2e529f..a6aba701 100644 --- a/laplace/utils/swag.py +++ b/laplace/utils/swag.py @@ -11,7 +11,8 @@ def _param_vector(model): return parameters_to_vector(model.parameters()).detach() -def fit_diagonal_swag_var(model, train_loader, criterion, n_snapshots_total=40, snapshot_freq=1, lr=0.01, momentum=0.9, weight_decay=3e-4, min_var=1e-30): +def fit_diagonal_swag_var(model, train_loader, criterion, n_snapshots_total=40, snapshot_freq=1, + lr=0.01, momentum=0.9, weight_decay=3e-4, min_var=1e-30): """ Fit diagonal SWAG [1], which estimates marginal variances of model parameters by computing the first and second moment of SGD iterates with a large learning rate. @@ -63,7 +64,8 @@ def fit_diagonal_swag_var(model, train_loader, criterion, n_snapshots_total=40, n_snapshots = 0 # run SGD to collect model snapshots - optimizer = torch.optim.SGD(_model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay) + optimizer = torch.optim.SGD( + _model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay) n_epochs = snapshot_freq * n_snapshots_total for epoch in range(n_epochs): for inputs, targets in train_loader: @@ -75,8 +77,9 @@ def fit_diagonal_swag_var(model, train_loader, criterion, n_snapshots_total=40, if epoch % snapshot_freq == 0: # update running estimates of first and second moment of model parameters - mean = mean * n_snapshots / (n_snapshots + 1) + _param_vector(_model) / (n_snapshots + 1) - sq_mean = sq_mean * n_snapshots / (n_snapshots + 1) + _param_vector(_model) ** 2 / (n_snapshots + 1) + old_fac, new_fac = n_snapshots / (n_snapshots + 1), 1 / (n_snapshots + 1) + mean = mean * old_fac + _param_vector(_model) * new_fac + sq_mean = sq_mean * old_fac + _param_vector(_model) ** 2 * new_fac n_snapshots += 1 # compute marginal parameter variances, Var[P] = E[P^2] - E[P]^2 diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 8e0d3d18..10b3c319 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -8,12 +8,15 @@ from laplace import Laplace, SubnetLaplace from laplace.baselaplace import DiagLaplace -from laplace.utils import SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask +from laplace.utils import (SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask, + LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask, + ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask) torch.manual_seed(240) torch.set_default_tensor_type(torch.DoubleTensor) -score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask, LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask] +score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask, + LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask] layer_subnet_masks = [ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask] all_subnet_masks = score_based_subnet_masks + layer_subnet_masks likelihoods = ['classification', 'regression'] @@ -51,23 +54,28 @@ def test_subnet_laplace_init(model, likelihood): # subnet Laplace with full Hessian should work hessian_structure = 'full' - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) assert isinstance(lap, SubnetLaplace) # subnet Laplace without specifying subnetwork indices should raise an error with pytest.raises(TypeError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + hessian_structure=hessian_structure) # subnet Laplace with diag, kron or lowrank Hessians should raise errors hessian_structure = 'diag' with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) hessian_structure = 'kron' with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) hessian_structure = 'lowrank' with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) @pytest.mark.parametrize('likelihood', likelihoods) @@ -77,81 +85,97 @@ def test_custom_subnetwork_indices(model, likelihood, class_loader, reg_loader): # subnetwork indices that are None should raise an error subnetwork_indices = None with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are not PyTorch tensors should raise an error subnetwork_indices = [0, 5, 11, 42] with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are empty tensors should raise an error subnetwork_indices = torch.LongTensor([]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are scalar tensors should raise an error subnetwork_indices = torch.LongTensor(11) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are not 1D PyTorch tensors should raise an error subnetwork_indices = torch.LongTensor([[0, 5], [11, 42]]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are double tensors should raise an error subnetwork_indices = torch.DoubleTensor([0.0, 5.0, 11.0, 42.0]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are float tensors should raise an error subnetwork_indices = torch.FloatTensor([0.0, 5.0, 11.0, 42.0]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are half tensors should raise an error subnetwork_indices = torch.HalfTensor([0.0, 5.0, 11.0, 42.0]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are int tensors should raise an error subnetwork_indices = torch.IntTensor([0, 5, 11, 42]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are short tensors should raise an error subnetwork_indices = torch.ShortTensor([0, 5, 11, 42]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are char tensors should raise an error subnetwork_indices = torch.CharTensor([0, 5, 11, 42]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that are bool tensors should raise an error subnetwork_indices = torch.BoolTensor([0, 5, 11, 42]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that contain elements smaller than zero should raise an error subnetwork_indices = torch.LongTensor([0, -1, -11]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that contain elements larger than n_params should raise an error subnetwork_indices = torch.LongTensor([model.n_params + 1, model.n_params + 42]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # subnetwork indices that contain duplicate entries should raise an error subnetwork_indices = torch.LongTensor([0, 0, 5, 11, 11, 42]) with pytest.raises(ValueError): - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') # Non-empty, 1-dimensional torch.LongTensor with valid entries should work subnetwork_indices = torch.LongTensor([0, 5, 11, 42]) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetwork_indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetwork_indices, hessian_structure='full') lap.fit(loader) assert isinstance(lap, SubnetLaplace) assert lap.n_params_subnet == 4 @@ -184,7 +208,7 @@ def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_load subnetmask = subnetwork_mask(**subnetmask_kwargs) subnetmask.select(loader) - # should raise error if we set number of subnet parameters to be larger than number of model parameters + # should raise error if number of subnet parameters is larger than number of model parameters subnetmask_kwargs.update(n_params_subnet=99999) with pytest.raises(ValueError): subnetmask = subnetwork_mask(**subnetmask_kwargs) @@ -207,7 +231,8 @@ def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_load subnetmask.select(loader) # define valid subnet Laplace model - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') assert isinstance(lap, SubnetLaplace) # fit Laplace model @@ -230,7 +255,8 @@ def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, re subnetmask_kwargs = dict(model=model) # fit last-layer Laplace model - lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer', hessian_structure='full') + lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer', + hessian_structure='full') lllap.fit(loader) # should raise error if we pass number of subnet parameters @@ -259,11 +285,13 @@ def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, re subnetmask = subnetwork_mask(**subnetmask_kwargs) subnetmask.select(loader) - # define last-layer Laplace model by parameter names and check that Hessian is identical to that of a full LLLaplace model + # define last-layer Laplace model by parameter names and check that + # Hessian is identical to that of a full LLLaplace model subnetmask_kwargs.update(parameter_names=['1.weight', '1.bias']) subnetmask = subnetwork_mask(**subnetmask_kwargs) subnetmask.select(loader) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert lllap.H.equal(lap.H) @@ -278,7 +306,8 @@ def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, re # select subnet mask and fit Laplace model subnetmask.select(loader) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert isinstance(lap, SubnetLaplace) @@ -301,11 +330,13 @@ def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, re subnetmask = subnetwork_mask(**subnetmask_kwargs) subnetmask.select(loader) - # define last-layer Laplace model by module name and check that Hessian is identical to that of a full LLLaplace model + # define last-layer Laplace model by module name and check that + # Hessian is identical to that of a full LLLaplace model subnetmask_kwargs.update(module_names=['1']) subnetmask = subnetwork_mask(**subnetmask_kwargs) subnetmask.select(loader) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert lllap.H.equal(lap.H) @@ -320,7 +351,8 @@ def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, re # select subnet mask and fit Laplace model subnetmask.select(loader) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert isinstance(lap, SubnetLaplace) @@ -341,7 +373,8 @@ def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, re # select subnet mask and fit Laplace model subnetmask.select(loader) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert isinstance(lap, SubnetLaplace) @@ -359,7 +392,8 @@ def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, re # select subnet mask and fit Laplace model subnetmask.select(loader) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert isinstance(lap, SubnetLaplace) @@ -389,7 +423,8 @@ def get_subnet_mask(self, train_loader): subnetwork_mask = FullSubnetMask subnetmask = subnetwork_mask(model=model) subnetmask.select(loader) - lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') lap.fit(loader) assert isinstance(lap, SubnetLaplace) @@ -399,7 +434,8 @@ def get_subnet_mask(self, train_loader): assert lap.n_params_subnet == model.n_params # check that the Hessian is identical to that of a all-weights FullLaplace model - full_lap = Laplace(model, likelihood=likelihood, subset_of_weights='all', hessian_structure='full') + full_lap = Laplace(model, likelihood=likelihood, subset_of_weights='all', + hessian_structure='full') full_lap.fit(loader) assert full_lap.H.equal(lap.H) @@ -421,7 +457,8 @@ def test_regression_predictive(model, reg_loader, subnetwork_mask): subnetmask = subnetwork_mask(**subnetmask_kwargs) subnetmask.select(reg_loader) - lap = Laplace(model, likelihood='regression', subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood='regression', subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') assert isinstance(lap, SubnetLaplace) lap.fit(reg_loader) @@ -462,7 +499,8 @@ def test_classification_predictive(model, class_loader, subnetwork_mask): subnetmask = subnetwork_mask(**subnetmask_kwargs) subnetmask.select(class_loader) - lap = Laplace(model, likelihood='classification', subset_of_weights='subnetwork', subnetwork_indices=subnetmask.indices, hessian_structure='full') + lap = Laplace(model, likelihood='classification', subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') assert isinstance(lap, SubnetLaplace) lap.fit(class_loader) From ddc15c55e37b36ba2826661155c4f849da38454f Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 11:56:42 +0000 Subject: [PATCH 47/49] Add call to _init_H() in the Subnet Laplace constructor --- laplace/subnetlaplace.py | 1 + 1 file changed, 1 insertion(+) diff --git a/laplace/subnetlaplace.py b/laplace/subnetlaplace.py index 32767ba2..86178ba6 100644 --- a/laplace/subnetlaplace.py +++ b/laplace/subnetlaplace.py @@ -80,6 +80,7 @@ def __init__(self, model, likelihood, subnetwork_indices, sigma_noise=1., prior_ self._check_subnetwork_indices(subnetwork_indices) self.backend.subnetwork_indices = subnetwork_indices self.n_params_subnet = len(subnetwork_indices) + self._init_H() def _init_H(self): self.H = torch.zeros(self.n_params_subnet, self.n_params_subnet, device=self._device) From 0952ead86a528a73ddfc07493923fb1847a65833 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 12:07:50 +0000 Subject: [PATCH 48/49] Add test for instantiating Subnet Laplace with large model --- tests/test_subnetlaplace.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/test_subnetlaplace.py b/tests/test_subnetlaplace.py index 10b3c319..f51f5a5e 100644 --- a/tests/test_subnetlaplace.py +++ b/tests/test_subnetlaplace.py @@ -5,6 +5,7 @@ from torch import nn from torch.nn.utils import parameters_to_vector from torch.utils.data import DataLoader, TensorDataset +from torchvision.models import wide_resnet50_2 from laplace import Laplace, SubnetLaplace from laplace.baselaplace import DiagLaplace @@ -30,6 +31,12 @@ def model(): return model +@pytest.fixture +def large_model(): + model = wide_resnet50_2() + return model + + @pytest.fixture def class_loader(): X = torch.randn(10, 3) @@ -78,6 +85,24 @@ def test_subnet_laplace_init(model, likelihood): subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure) +@pytest.mark.parametrize('likelihood', likelihoods) +def test_subnet_laplace_large_init(large_model, likelihood): + # use random subnet mask for this test + subnetwork_mask = RandomSubnetMask + n_param_subnet = 10 + subnetmask_kwargs = dict(model=large_model, n_params_subnet=n_param_subnet) + subnetmask = subnetwork_mask(**subnetmask_kwargs) + subnetmask.select() + + lap = Laplace(large_model, likelihood=likelihood, subset_of_weights='subnetwork', + subnetwork_indices=subnetmask.indices, hessian_structure='full') + assert lap.n_params_subnet == n_param_subnet + assert lap.H.shape == (lap.n_params_subnet, lap.n_params_subnet) + H = lap.H.clone() + lap._init_H() + assert torch.allclose(H, lap.H) + + @pytest.mark.parametrize('likelihood', likelihoods) def test_custom_subnetwork_indices(model, likelihood, class_loader, reg_loader): loader = class_loader if likelihood == 'classification' else reg_loader From 83877b29bcc1a040718a1d127cfb5773348cca84 Mon Sep 17 00:00:00 2001 From: "Erik A. Daxberger" Date: Wed, 12 Jan 2022 12:12:41 +0000 Subject: [PATCH 49/49] Update docs --- docs/baselaplace.html | 505 ++++++----- docs/curvature/asdl.html | 54 +- docs/curvature/backpack.html | 15 +- docs/curvature/curvature.html | 36 +- docs/curvature/index.html | 105 ++- docs/index.html | 441 ++++++++-- docs/laplace.html | 4 +- docs/lllaplace.html | 208 +++-- docs/regression_example.png | Bin 27924 -> 28052 bytes docs/regression_example_online.png | Bin 28674 -> 28716 bytes docs/subnetlaplace.html | 171 ++++ docs/{ => utils}/feature_extractor.html | 30 +- docs/utils/index.html | 1017 +++++++++++++++++++++++ docs/{ => utils}/matrix.html | 80 +- docs/utils/subnetmask.html | 466 +++++++++++ docs/utils/swag.html | 102 +++ docs/{ => utils}/utils.html | 44 +- 17 files changed, 2808 insertions(+), 470 deletions(-) create mode 100644 docs/subnetlaplace.html rename docs/{ => utils}/feature_extractor.html (84%) create mode 100644 docs/utils/index.html rename docs/{ => utils}/matrix.html (75%) create mode 100644 docs/utils/subnetmask.html create mode 100644 docs/utils/swag.html rename docs/{ => utils}/utils.html (84%) diff --git a/docs/baselaplace.html b/docs/baselaplace.html index fbaa0a07..ea13c62b 100644 --- a/docs/baselaplace.html +++ b/docs/baselaplace.html @@ -172,6 +172,253 @@

Parameters

+
+class ParametricLaplace +(model, likelihood, sigma_noise=1.0, prior_precision=1.0, prior_mean=0.0, temperature=1.0, backend=laplace.curvature.backpack.BackPackGGN, backend_kwargs=None) +
+
+

Parametric Laplace class.

+

Subclasses need to specify how the Hessian approximation is initialized, +how to add up curvature over training data, how to sample from the +Laplace approximation, and how to compute the functional variance.

+

A Laplace approximation is represented by a MAP which is given by the +model parameter and a posterior precision or covariance specifying +a Gaussian distribution \mathcal{N}(\theta_{MAP}, P^{-1}). +The goal of this class is to compute the posterior precision P +which sums as + +P = \sum_{n=1}^N \nabla^2_\theta \log p(\mathcal{D}_n \mid \theta) +\vert_{\theta_{MAP}} + \nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}}. + +Every subclass implements different approximations to the log likelihood Hessians, +for example, a diagonal one. The prior is assumed to be Gaussian and therefore we have +a simple form for \nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}} = P_0 . +In particular, we assume a scalar, layer-wise, or diagonal prior precision so that in +all cases P_0 = \textrm{diag}(p_0) and the structure of p_0 can be varied.

+

Ancestors

+ +

Subclasses

+ +

Instance variables

+
+
var scatter
+
+

Computes the scatter, a term of the log marginal likelihood that +corresponds to L-2 regularization: +scatter = (\theta_{MAP} - \mu_0)^{T} P_0 (\theta_{MAP} - \mu_0) .

+

Returns

+

[type] +[description]

+
+
var log_det_prior_precision
+
+

Compute log determinant of the prior precision +\log \det P_0

+

Returns

+
+
log_det : torch.Tensor
+
 
+
+
+
var log_det_posterior_precision
+
+

Compute log determinant of the posterior precision +\log \det P which depends on the subclasses structure +used for the Hessian approximation.

+

Returns

+
+
log_det : torch.Tensor
+
 
+
+
+
var log_det_ratio
+
+

Compute the log determinant ratio, a part of the log marginal likelihood. + +\log \frac{\det P}{\det P_0} = \log \det P - \log \det P_0 +

+

Returns

+
+
log_det_ratio : torch.Tensor
+
 
+
+
+
var posterior_precision
+
+

Compute or return the posterior precision P.

+

Returns

+
+
posterior_prec : torch.Tensor
+
 
+
+
+
+

Methods

+
+
+def fit(self, train_loader, override=True) +
+
+

Fit the local Laplace approximation at the parameters of the model.

+

Parameters

+
+
train_loader : torch.data.utils.DataLoader
+
each iterate is a training batch (X, y); +train_loader.dataset needs to be set to access N, size of the data set
+
override : bool, default=True
+
whether to initialize H, loss, and n_data again; setting to False is useful for +online learning settings to accumulate a sequential posterior approximation.
+
+
+
+def square_norm(self, value) +
+
+

Compute the square norm under post. Precision with value-self.mean as 𝛥: + +\Delta^ +op P \Delta + +Returns

+
+
+
square_form
+
 
+
+
+
+def log_prob(self, value, normalized=True) +
+
+

Compute the log probability under the (current) Laplace approximation.

+

Parameters

+
+
normalized : bool, default=True
+
whether to return log of a properly normalized Gaussian or just the +terms that depend on value.
+
+

Returns

+
+
log_prob : torch.Tensor
+
 
+
+
+
+def log_marginal_likelihood(self, prior_precision=None, sigma_noise=None) +
+
+

Compute the Laplace approximation to the log marginal likelihood subject +to specific Hessian approximations that subclasses implement. +Requires that the Laplace approximation has been fit before. +The resulting torch.Tensor is differentiable in prior_precision and +sigma_noise if these have gradients enabled. +By passing prior_precision or sigma_noise, the current value is +overwritten. This is useful for iterating on the log marginal likelihood.

+

Parameters

+
+
prior_precision : torch.Tensor, optional
+
prior precision if should be changed from current prior_precision value
+
sigma_noise : [type], optional
+
observation noise standard deviation if should be changed
+
+

Returns

+
+
log_marglik : torch.Tensor
+
 
+
+
+
+def predictive_samples(self, x, pred_type='glm', n_samples=100) +
+
+

Sample from the posterior predictive on input data x. +Can be used, for example, for Thompson sampling.

+

Parameters

+
+
x : torch.Tensor
+
input data (batch_size, input_shape)
+
pred_type : {'glm', 'nn'}, default='glm'
+
type of posterior predictive, linearized GLM predictive or neural +network sampling predictive. The GLM predictive is consistent with +the curvature approximations used here.
+
n_samples : int
+
number of samples
+
+

Returns

+
+
samples : torch.Tensor
+
samples (n_samples, batch_size, output_shape)
+
+
+
+def functional_variance(self, Jacs) +
+
+

Compute functional variance for the 'glm' predictive: +f_var[i] = Jacs[i] @ P.inv() @ Jacs[i].T, which is a output x output +predictive covariance matrix. +Mathematically, we have for a single Jacobian +\mathcal{J} = \nabla_\theta f(x;\theta)\vert_{\theta_{MAP}} +the output covariance matrix + \mathcal{J} P^{-1} \mathcal{J}^T .

+

Parameters

+
+
Jacs : torch.Tensor
+
Jacobians of model output wrt parameters +(batch, outputs, parameters)
+
+

Returns

+
+
f_var : torch.Tensor
+
output covariance (batch, outputs, outputs)
+
+
+
+def sample(self, n_samples=100) +
+
+

Sample from the Laplace posterior approximation, i.e., + \theta \sim \mathcal{N}(\theta_{MAP}, P^{-1}).

+

Parameters

+
+
n_samples : int, default=100
+
number of samples
+
+
+
+def optimize_prior_precision(self, method='marglik', pred_type='glm', n_steps=100, lr=0.1, init_prior_prec=1.0, val_loader=None, loss=<function get_nll>, log_prior_prec_min=-4, log_prior_prec_max=4, grid_size=100, link_approx='probit', n_samples=100, verbose=False, cv_loss_with_var=False) +
+
+
+
+
+

Inherited members

+ +
class FullLaplace (model, likelihood, sigma_noise=1.0, prior_precision=1.0, prior_mean=0.0, temperature=1.0, backend=laplace.curvature.backpack.BackPackGGN, backend_kwargs=None) @@ -190,6 +437,7 @@

Ancestors

Subclasses

Instance variables

@@ -233,11 +481,13 @@

Inherited members

  • log_det_ratio
  • log_likelihood
  • log_marginal_likelihood
  • +
  • log_prob
  • optimize_prior_precision_base
  • predictive_samples
  • prior_precision_diag
  • sample
  • scatter
  • +
  • square_norm
  • @@ -252,7 +502,7 @@

    Inherited members

    Mathematically, we have for each parameter group, e.g., torch.nn.Module, that \P\approx Q \otimes H. See BaseLaplace for the full interface and see -Kron and KronDecomposed for the structure of +Kron and KronDecomposed for the structure of the Kronecker factors. Kron is used to aggregate factors by summing up and KronDecomposed is used to add the prior, a Hessian factor (e.g. temperature), and computing posterior covariances, marginal likelihood, etc. @@ -273,7 +523,7 @@

    Instance variables

    Kronecker factored Posterior precision P.

    Returns

    -
    precision : KronDecomposed
    +
    precision : KronDecomposed
     
    @@ -293,11 +543,13 @@

    Inherited members

  • log_det_ratio
  • log_likelihood
  • log_marginal_likelihood
  • +
  • log_prob
  • optimize_prior_precision_base
  • predictive_samples
  • prior_precision_diag
  • sample
  • scatter
  • +
  • square_norm
  • @@ -361,218 +613,80 @@

    Inherited members

  • log_det_ratio
  • log_likelihood
  • log_marginal_likelihood
  • +
  • log_prob
  • optimize_prior_precision_base
  • predictive_samples
  • prior_precision_diag
  • sample
  • scatter
  • +
  • square_norm
  • -
    -class ParametricLaplace -(model, likelihood, sigma_noise=1.0, prior_precision=1.0, prior_mean=0.0, temperature=1.0, backend=laplace.curvature.backpack.BackPackGGN, backend_kwargs=None) +
    +class LowRankLaplace +(model, likelihood, sigma_noise=1, prior_precision=1, prior_mean=0, temperature=1, backend=laplace.curvature.asdl.AsdlHessian, backend_kwargs=None)
    -

    Parametric Laplace class.

    -

    Subclasses need to specify how the Hessian approximation is initialized, -how to add up curvature over training data, how to sample from the -Laplace approximation, and how to compute the functional variance.

    -

    A Laplace approximation is represented by a MAP which is given by the -model parameter and a posterior precision or covariance specifying -a Gaussian distribution \mathcal{N}(\theta_{MAP}, P^{-1}). -The goal of this class is to compute the posterior precision P -which sums as - -P = \sum_{n=1}^N \nabla^2_\theta \log p(\mathcal{D}_n \mid \theta) -\vert_{\theta_{MAP}} + \nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}}. - -Every subclass implements different approximations to the log likelihood Hessians, -for example, a diagonal one. The prior is assumed to be Gaussian and therefore we have -a simple form for \nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}} = P_0 . -In particular, we assume a scalar, layer-wise, or diagonal prior precision so that in -all cases P_0 = \textrm{diag}(p_0) and the structure of p_0 can be varied.

    +

    Laplace approximation with low-rank log likelihood Hessian (approximation). +The low-rank matrix is represented by an eigendecomposition (vecs, values). +Based on the chosen backend, either a true Hessian or, for example, GGN +approximation could be used. +The posterior precision is computed as + P = V diag(l) V^T + P_0. +To sample, compute the functional variance, and log determinant, algebraic tricks +are usedto reduce the costs of inversion to the that of a K +imes K matrix +if we have a rank of K.

    +

    See BaseLaplace for the full interface.

    Ancestors

    -

    Subclasses

    -

    Instance variables

    -
    var scatter
    -
    -

    Computes the scatter, a term of the log marginal likelihood that -corresponds to L-2 regularization: -scatter = (\theta_{MAP} - \mu_0)^{T} P_0 (\theta_{MAP} - \mu_0) .

    -

    Returns

    -

    [type] -[description]

    -
    -
    var log_det_prior_precision
    -
    -

    Compute log determinant of the prior precision -\log \det P_0

    -

    Returns

    -
    -
    log_det : torch.Tensor
    -
     
    -
    -
    -
    var log_det_posterior_precision
    +
    var V
    -

    Compute log determinant of the posterior precision -\log \det P which depends on the subclasses structure -used for the Hessian approximation.

    -

    Returns

    -
    -
    log_det : torch.Tensor
    -
     
    -
    -
    -
    var log_det_ratio
    -
    -

    Compute the log determinant ratio, a part of the log marginal likelihood. - -\log \frac{\det P}{\det P_0} = \log \det P - \log \det P_0 -

    -

    Returns

    -
    -
    log_det_ratio : torch.Tensor
    -
     
    -
    -
    -
    var posterior_precision
    -
    -

    Compute or return the posterior precision P.

    -

    Returns

    -
    -
    posterior_prec : torch.Tensor
    -
     
    -
    -
    -
    -

    Methods

    -
    -
    -def fit(self, train_loader) -
    -
    -

    Fit the local Laplace approximation at the parameters of the model.

    -

    Parameters

    -
    -
    train_loader : torch.data.utils.DataLoader
    -
    each iterate is a training batch (X, y); -train_loader.dataset needs to be set to access N, size of the data set
    -
    -
    -
    -def log_marginal_likelihood(self, prior_precision=None, sigma_noise=None) -
    -
    -

    Compute the Laplace approximation to the log marginal likelihood subject -to specific Hessian approximations that subclasses implement. -Requires that the Laplace approximation has been fit before. -The resulting torch.Tensor is differentiable in prior_precision and -sigma_noise if these have gradients enabled. -By passing prior_precision or sigma_noise, the current value is -overwritten. This is useful for iterating on the log marginal likelihood.

    -

    Parameters

    -
    -
    prior_precision : torch.Tensor, optional
    -
    prior precision if should be changed from current prior_precision value
    -
    sigma_noise : [type], optional
    -
    observation noise standard deviation if should be changed
    -
    -

    Returns

    -
    -
    log_marglik : torch.Tensor
    -
     
    -
    +
    -
    -def predictive_samples(self, x, pred_type='glm', n_samples=100) -
    +
    var Kinv
    -

    Sample from the posterior predictive on input data x. -Can be used, for example, for Thompson sampling.

    -

    Parameters

    -
    -
    x : torch.Tensor
    -
    input data (batch_size, input_shape)
    -
    pred_type : {'glm', 'nn'}, default='glm'
    -
    type of posterior predictive, linearized GLM predictive or neural -network sampling predictive. The GLM predictive is consistent with -the curvature approximations used here.
    -
    n_samples : int
    -
    number of samples
    -
    -

    Returns

    -
    -
    samples : torch.Tensor
    -
    samples (n_samples, batch_size, output_shape)
    -
    +
    -
    -def functional_variance(self, Jacs) -
    +
    var posterior_precision
    -

    Compute functional variance for the 'glm' predictive: -f_var[i] = Jacs[i] @ P.inv() @ Jacs[i].T, which is a output x output -predictive covariance matrix. -Mathematically, we have for a single Jacobian -\mathcal{J} = \nabla_\theta f(x;\theta)\vert_{\theta_{MAP}} -the output covariance matrix - \mathcal{J} P^{-1} \mathcal{J}^T .

    -

    Parameters

    -
    -
    Jacs : torch.Tensor
    -
    Jacobians of model output wrt parameters -(batch, outputs, parameters)
    -
    +

    Return correctly scaled posterior precision that would be constructed +as H[0] @ diag(H[1]) @ H[0].T + self.prior_precision_diag.

    Returns

    -
    f_var : torch.Tensor
    -
    output covariance (batch, outputs, outputs)
    -
    -
    -
    -def sample(self, n_samples=100) -
    -
    -

    Sample from the Laplace posterior approximation, i.e., - \theta \sim \mathcal{N}(\theta_{MAP}, P^{-1}).

    -

    Parameters

    -
    -
    n_samples : int, default=100
    -
    number of samples
    +
    H : tuple(eigenvectors, eigenvalues)
    +
    scaled self.H with temperature and loss factors.
    +
    prior_precision_diag : torch.Tensor
    +
    diagonal prior precision shape parameters to be added to H.
    -
    -def optimize_prior_precision(self, method='marglik', pred_type='glm', n_steps=100, lr=0.1, init_prior_prec=1.0, val_loader=None, loss=<function get_nll>, log_prior_prec_min=-4, log_prior_prec_max=4, grid_size=100, link_approx='probit', n_samples=100, verbose=False, cv_loss_with_var=False) -
    -
    -
    -

    Inherited members

    @@ -603,18 +717,11 @@

    FullLaplace

    - -
  • -

    KronLaplace

    -
  • -
  • -

    DiagLaplace

    -
  • -
  • ParametricLaplace

  • +
  • +

    FullLaplace

    +
  • +
  • +

    KronLaplace

    +
  • +
  • +

    DiagLaplace

    +
  • +
  • +

    LowRankLaplace

    +
  • diff --git a/docs/curvature/asdl.html b/docs/curvature/asdl.html index 23b40a34..ecba76dd 100644 --- a/docs/curvature/asdl.html +++ b/docs/curvature/asdl.html @@ -35,7 +35,7 @@

    Classes

    class AsdlInterface -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Interface for asdfghjkl backend.

    @@ -47,19 +47,18 @@

    Subclasses

    -

    Static methods

    +

    Methods

    -def jacobians(model, x) +def jacobians(self, x)

    Compute Jacobians \nabla_\theta f(x;\theta) at current parameter \theta using asdfghjkl's gradient per output dimension.

    Parameters

    -
    model : torch.nn.Module
    -
     
    x : torch.Tensor
    input data (batch, input_shape) on compatible device with model.
    @@ -71,9 +70,6 @@

    Returns

    output function (batch, outputs)
    -
    -

    Methods

    -
    def gradients(self, x, y)
    @@ -108,9 +104,43 @@

    Inherited members

    +
    +class AsdlHessian +(model, likelihood, last_layer=False, low_rank=10) +
    +
    +

    Interface for asdfghjkl backend.

    +

    Ancestors

    + +

    Methods

    +
    +
    +def eig_lowrank(self, data_loader) +
    +
    +
    +
    +
    +

    Inherited members

    + +
    class AsdlGGN -(model, likelihood, last_layer=False, stochastic=False) +(model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False)

    Implementation of the GGNInterface using asdfghjkl.

    @@ -184,6 +214,12 @@

    AsdlHessian

    + + +
  • AsdlGGN

  • diff --git a/docs/curvature/backpack.html b/docs/curvature/backpack.html index 0e610d54..1ae69561 100644 --- a/docs/curvature/backpack.html +++ b/docs/curvature/backpack.html @@ -35,7 +35,7 @@

    Classes

    class BackPackInterface -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Interface for Backpack backend.

    @@ -48,18 +48,16 @@

    Subclasses

  • BackPackEF
  • BackPackGGN
  • -

    Static methods

    +

    Methods

    -def jacobians(model, x) +def jacobians(self, x)

    Compute Jacobians \nabla_{\theta} f(x;\theta) at current parameter \theta using backpack's BatchGrad per output dimension.

    Parameters

    -
    model : torch.nn.Module
    -
     
    x : torch.Tensor
    input data (batch, input_shape) on compatible device with model.
    @@ -71,9 +69,6 @@

    Returns

    output function (batch, outputs)
    -
    -

    Methods

    -
    def gradients(self, x, y)
    @@ -110,7 +105,7 @@

    Inherited members

  • class BackPackGGN -(model, likelihood, last_layer=False, stochastic=False) +(model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False)

    Implementation of the GGNInterface using Backpack.

    @@ -136,7 +131,7 @@

    Inherited members

    class BackPackEF -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Implementation of EFInterface using Backpack.

    diff --git a/docs/curvature/curvature.html b/docs/curvature/curvature.html index 084432df..645baae7 100644 --- a/docs/curvature/curvature.html +++ b/docs/curvature/curvature.html @@ -35,7 +35,7 @@

    Classes

    class CurvatureInterface -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Interface to access curvature for a model and corresponding likelihood. @@ -45,12 +45,15 @@

    Classes

    structures, for example, a block-diagonal one.

    Parameters

    -
    model : torch.nn.Module or FeatureExtractor
    +
    model : torch.nn.Module or FeatureExtractor
    torch model (neural network)
    likelihood : {'classification', 'regression'}
     
    last_layer : bool, default=False
    only consider curvature of last layer
    +
    subnetwork_indices : torch.Tensor, default=None
    +
    indices of the vectorized model parameters that define the subnetwork +to apply the Laplace approximation over

    Attributes

    @@ -67,17 +70,15 @@

    Subclasses

  • EFInterface
  • GGNInterface
  • -

    Static methods

    +

    Methods

    -def jacobians(model, x) +def jacobians(self, x)

    Compute Jacobians \nabla_\theta f(x;\theta) at current parameter \theta.

    Parameters

    -
    model : torch.nn.Module
    -
     
    x : torch.Tensor
    input data (batch, input_shape) on compatible device with model.
    @@ -90,15 +91,13 @@

    Returns

    -def last_layer_jacobians(model, x) +def last_layer_jacobians(self, x)

    Compute Jacobians \nabla_{\theta_\textrm{last}} f(x;\theta_\textrm{last}) only at current last-layer parameter \theta_{\textrm{last}}.

    Parameters

    -
    model : FeatureExtractor
    -
     
    x : torch.Tensor
     
    @@ -110,9 +109,6 @@

    Returns

    output function (batch, outputs)
    -
    -

    Methods

    -
    def gradients(self, x, y)
    @@ -175,7 +171,7 @@

    Returns

    loss : torch.Tensor
     
    -
    H : Kron
    +
    H : Kron
    Kronecker factored Hessian approximation.
    @@ -204,7 +200,7 @@

    Returns

    class GGNInterface -(model, likelihood, last_layer=False, stochastic=False) +(model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False)

    Generalized Gauss-Newton or Fisher Curvature Interface. @@ -212,12 +208,15 @@

    Returns

    In addition to CurvatureInterface, methods for Jacobians are required by subclasses.

    Parameters

    -
    model : torch.nn.Module or FeatureExtractor
    +
    model : torch.nn.Module or FeatureExtractor
    torch model (neural network)
    likelihood : {'classification', 'regression'}
     
    last_layer : bool, default=False
    only consider curvature of last layer
    +
    subnetwork_indices : torch.Tensor, default=None
    +
    indices of the vectorized model parameters that define the subnetwork +to apply the Laplace approximation over
    stochastic : bool, default=False
    Fisher if stochastic else GGN
    @@ -270,19 +269,22 @@

    Inherited members

    class EFInterface -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Interface for Empirical Fisher as Hessian approximation. In addition to CurvatureInterface, methods for gradients are required by subclasses.

    Parameters

    -
    model : torch.nn.Module or FeatureExtractor
    +
    model : torch.nn.Module or FeatureExtractor
    torch model (neural network)
    likelihood : {'classification', 'regression'}
     
    last_layer : bool, default=False
    only consider curvature of last layer
    +
    subnetwork_indices : torch.Tensor, default=None
    +
    indices of the vectorized model parameters that define the subnetwork +to apply the Laplace approximation over

    Attributes

    diff --git a/docs/curvature/index.html b/docs/curvature/index.html index 72e1203b..00001e2f 100644 --- a/docs/curvature/index.html +++ b/docs/curvature/index.html @@ -50,7 +50,7 @@

    Classes

    class CurvatureInterface -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Interface to access curvature for a model and corresponding likelihood. @@ -60,12 +60,15 @@

    Classes

    structures, for example, a block-diagonal one.

    Parameters

    -
    model : torch.nn.Module or FeatureExtractor
    +
    model : torch.nn.Module or FeatureExtractor
    torch model (neural network)
    likelihood : {'classification', 'regression'}
     
    last_layer : bool, default=False
    only consider curvature of last layer
    +
    subnetwork_indices : torch.Tensor, default=None
    +
    indices of the vectorized model parameters that define the subnetwork +to apply the Laplace approximation over

    Attributes

    @@ -82,17 +85,15 @@

    Subclasses

  • EFInterface
  • GGNInterface
  • -

    Static methods

    +

    Methods

    -def jacobians(model, x) +def jacobians(self, x)

    Compute Jacobians \nabla_\theta f(x;\theta) at current parameter \theta.

    Parameters

    -
    model : torch.nn.Module
    -
     
    x : torch.Tensor
    input data (batch, input_shape) on compatible device with model.
    @@ -105,15 +106,13 @@

    Returns

    -def last_layer_jacobians(model, x) +def last_layer_jacobians(self, x)

    Compute Jacobians \nabla_{\theta_\textrm{last}} f(x;\theta_\textrm{last}) only at current last-layer parameter \theta_{\textrm{last}}.

    Parameters

    -
    model : FeatureExtractor
    -
     
    x : torch.Tensor
     
    @@ -125,9 +124,6 @@

    Returns

    output function (batch, outputs)
    -
    -

    Methods

    -
    def gradients(self, x, y)
    @@ -190,7 +186,7 @@

    Returns

    loss : torch.Tensor
     
    -
    H : Kron
    +
    H : Kron
    Kronecker factored Hessian approximation.
    @@ -219,7 +215,7 @@

    Returns

    class GGNInterface -(model, likelihood, last_layer=False, stochastic=False) +(model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False)

    Generalized Gauss-Newton or Fisher Curvature Interface. @@ -227,12 +223,15 @@

    Returns

    In addition to CurvatureInterface, methods for Jacobians are required by subclasses.

    Parameters

    -
    model : torch.nn.Module or FeatureExtractor
    +
    model : torch.nn.Module or FeatureExtractor
    torch model (neural network)
    likelihood : {'classification', 'regression'}
     
    last_layer : bool, default=False
    only consider curvature of last layer
    +
    subnetwork_indices : torch.Tensor, default=None
    +
    indices of the vectorized model parameters that define the subnetwork +to apply the Laplace approximation over
    stochastic : bool, default=False
    Fisher if stochastic else GGN
    @@ -285,19 +284,22 @@

    Inherited members

    class EFInterface -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Interface for Empirical Fisher as Hessian approximation. In addition to CurvatureInterface, methods for gradients are required by subclasses.

    Parameters

    -
    model : torch.nn.Module or FeatureExtractor
    +
    model : torch.nn.Module or FeatureExtractor
    torch model (neural network)
    likelihood : {'classification', 'regression'}
     
    last_layer : bool, default=False
    only consider curvature of last layer
    +
    subnetwork_indices : torch.Tensor, default=None
    +
    indices of the vectorized model parameters that define the subnetwork +to apply the Laplace approximation over

    Attributes

    @@ -356,7 +358,7 @@

    Inherited members

    class BackPackInterface -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Interface for Backpack backend.

    @@ -369,18 +371,16 @@

    Subclasses

  • BackPackEF
  • BackPackGGN
  • -

    Static methods

    +

    Methods

    -def jacobians(model, x) +def jacobians(self, x)

    Compute Jacobians \nabla_{\theta} f(x;\theta) at current parameter \theta using backpack's BatchGrad per output dimension.

    Parameters

    -
    model : torch.nn.Module
    -
     
    x : torch.Tensor
    input data (batch, input_shape) on compatible device with model.
    @@ -392,9 +392,6 @@

    Returns

    output function (batch, outputs)
    -
    -

    Methods

    -
    def gradients(self, x, y)
    @@ -431,7 +428,7 @@

    Inherited members

    class BackPackGGN -(model, likelihood, last_layer=False, stochastic=False) +(model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False)

    Implementation of the GGNInterface using Backpack.

    @@ -457,7 +454,7 @@

    Inherited members

    class BackPackEF -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Implementation of EFInterface using Backpack.

    @@ -483,7 +480,7 @@

    Inherited members

    class AsdlInterface -(model, likelihood, last_layer=False) +(model, likelihood, last_layer=False, subnetwork_indices=None)

    Interface for asdfghjkl backend.

    @@ -495,19 +492,18 @@

    Subclasses

    -

    Static methods

    +

    Methods

    -def jacobians(model, x) +def jacobians(self, x)

    Compute Jacobians \nabla_\theta f(x;\theta) at current parameter \theta using asdfghjkl's gradient per output dimension.

    Parameters

    -
    model : torch.nn.Module
    -
     
    x : torch.Tensor
    input data (batch, input_shape) on compatible device with model.
    @@ -519,9 +515,6 @@

    Returns

    output function (batch, outputs)
    -
    -

    Methods

    -
    def gradients(self, x, y)
    @@ -558,7 +551,7 @@

    Inherited members

    class AsdlGGN -(model, likelihood, last_layer=False, stochastic=False) +(model, likelihood, last_layer=False, subnetwork_indices=None, stochastic=False)

    Implementation of the GGNInterface using asdfghjkl.

    @@ -608,6 +601,40 @@

    Inherited members

    +
    +class AsdlHessian +(model, likelihood, last_layer=False, low_rank=10) +
    +
    +

    Interface for asdfghjkl backend.

    +

    Ancestors

    + +

    Methods

    +
    +
    +def eig_lowrank(self, data_loader) +
    +
    +
    +
    +
    +

    Inherited members

    + +
    @@ -680,6 +707,12 @@

    AsdlEF

    +
  • +

    AsdlHessian

    + +
  • diff --git a/docs/index.html b/docs/index.html index 4f80e44c..2819a3ce 100644 --- a/docs/index.html +++ b/docs/index.html @@ -29,15 +29,15 @@

    Package laplace

    Laplace

    Main

    -

    The laplace package facilitates the application of Laplace approximations for entire neural networks or just their last layer. +

    The laplace package facilitates the application of Laplace approximations for entire neural networks, subnetworks of neural networks, or just their last layer. The package enables posterior approximations, marginal-likelihood estimation, and various posterior predictive computations. The library documentation is available at https://aleximmer.github.io/Laplace.

    There is also a corresponding paper, Laplace Redux — Effortless Bayesian Deep Learning, which introduces the library, provides an introduction to the Laplace approximation, reviews its use in deep learning, and empirically demonstrates its versatility and competitiveness. Please consider referring to the paper when using our library:

    -
    @article{daxberger2021laplace,
    -  title={Laplace Redux--Effortless Bayesian Deep Learning},
    -  author={Daxberger, Erik and Kristiadi, Agustinus and Immer, Alexander
    -          and Eschenhagen, Runa and Bauer, Matthias and Hennig, Philipp},
    -  journal={arXiv preprint arXiv:2106.14806},
    +
    @inproceedings{laplace2021,
    +  title={Laplace Redux--Effortless {B}ayesian Deep Learning},
    +  author={Erik Daxberger and Agustinus Kristiadi and Alexander Immer 
    +          and Runa Eschenhagen and Matthias Bauer and Philipp Hennig},
    +  booktitle={{N}eur{IPS}},
       year={2021}
     }
     
    @@ -56,34 +56,39 @@

    Setup

    Structure

    The laplace package consists of two main components:

      -
    1. The subclasses of laplace.BaseLaplace that implement different sparsity structures: different subsets of weights ('all' and 'last_layer') and different structures of the Hessian approximation ('full', 'kron', and 'diag'). This results in six currently available options: FullLaplace, KronLaplace, DiagLaplace, and the corresponding last-layer variations FullLLLaplace, KronLLLaplace, -and DiagLLLaplace, which are all subclasses of laplace.LLLaplace. All of these can be conveniently accessed via the laplace.Laplace function.
    2. +
    3. The subclasses of laplace.BaseLaplace that implement different sparsity structures: different subsets of weights ('all', 'subnetwork' and 'last_layer') and different structures of the Hessian approximation ('full', 'kron', 'lowrank' and 'diag'). This results in eight currently available options: FullLaplace, KronLaplace, DiagLaplace, the corresponding last-layer variations FullLLLaplace, KronLLLaplace, +and DiagLLLaplace (which are all subclasses of laplace.LLLaplace), laplace.SubnetLaplace (which only supports a 'full' Hessian approximation) and LowRankLaplace (which only supports inference over 'all' weights). All of these can be conveniently accessed via the laplace.Laplace function.
    4. The backends in laplace.curvature which provide access to Hessian approximations of the corresponding sparsity structures, for example, the diagonal GGN.

    Additionally, the package provides utilities for -decomposing a neural network into feature extractor and last layer for LLLaplace subclasses (laplace.feature_extractor) +decomposing a neural network into feature extractor and last layer for LLLaplace subclasses (laplace.utils.feature_extractor) and -effectively dealing with Kronecker factors (laplace.matrix).

    +effectively dealing with Kronecker factors (laplace.utils.matrix).

    +

    Finally, the package implements several options to select/specify a subnetwork for SubnetLaplace (as subclasses of laplace.utils.subnetmask.SubnetMask). +Automatic subnetwork selection strategies include: uniformly at random (RandomSubnetMask), by largest parameter magnitudes (LargestMagnitudeSubnetMask), and by largest marginal parameter variances (LargestVarianceDiagLaplaceSubnetMask and LargestVarianceSWAGSubnetMask). +In addition to that, subnetworks can also be specified manually, by listing the names of either the model parameters (ParamNameSubnetMask) or modules (ModuleNameSubnetMask) to perform Laplace inference over.

    Extendability

    To extend the laplace package, new BaseLaplace subclasses can be designed, for example, -a block-diagonal structure or subset-of-weights Laplace. -Alternatively, extending or integrating backends (subclasses of curvature.curvature) allows to provide different Hessian +Laplace with a block-diagonal Hessian structure. +One can also implement custom subnetwork selection strategies as new subclasses of SubnetMask.

    +

    Alternatively, extending or integrating backends (subclasses of curvature.curvature) allows to provide different Hessian approximations to the Laplace approximations. For example, currently the curvature.BackPackInterface based on BackPACK and curvature.AsdlInterface based on ASDL are available. The AsdlInterface provides a Kronecker factored empirical Fisher while the BackPackInterface does not, and only the BackPackInterface provides access to Hessian approximations for a regression (MSELoss) loss function.

    Example usage

    -

    Post-hoc prior precision tuning of last-layer LA

    +

    Post-hoc prior precision tuning of diagonal LA

    In the following example, a pre-trained model is loaded, -then the Laplace approximation is fit to the training data, +then the Laplace approximation is fit to the training data +(using a diagonal Hessian approximation over all parameters), and the prior precision is optimized with cross-validation 'CV'. After that, the resulting LA is used for prediction with the 'probit' predictive for classification.

    from laplace import Laplace
     
    -# pre-trained model
    +# Pre-trained model
     model = load_map_model()  
     
     # User-specified LA flavor
    @@ -97,7 +102,7 @@ 

    Post-hoc prio pred = la(x, link_approx='probit')

    Differentiating the log marginal likelihood w.r.t. hyperparameters

    -

    The marginal likelihood can be used for model selection and is differentiable +

    The marginal likelihood can be used for model selection [10] and is differentiable for continuous hyperparameters like the prior precision or observation noise. Here, we fit the library default, KFAC last-layer LA and differentiate the log marginal likelihood.

    @@ -114,6 +119,41 @@

    Differe ml = la.log_marginal_likelihood(prior_prec, obs_noise) ml.backward()

    +

    Applying the LA over only a subset of the model parameters

    +

    This example shows how to fit the Laplace approximation over only +a subnetwork within a neural network (while keeping all other parameters +fixed at their MAP estimates), as proposed in [11]. It also exemplifies +different ways to specify the subnetwork to perform inference over.

    +
    from laplace import Laplace
    +
    +# Pre-trained model
    +model = load_model()
    +
    +# Examples of different ways to specify the subnetwork
    +# via indices of the vectorized model parameters
    +#
    +# Example 1: select the 128 parameters with the largest magnitude
    +from laplace.utils import LargestMagnitudeSubnetMask
    +subnetwork_mask = LargestMagnitudeSubnetMask(model, n_params_subnet=128)
    +subnetwork_indices = subnetwork_mask.select()
    +
    +# Example 2: specify the layers that define the subnetwork
    +from laplace.utils import ModuleNameSubnetMask
    +subnetwork_mask = ModuleNameSubnetMask(model, module_names=['layer.1', 'layer.3'])
    +subnetwork_mask.select()
    +subnetwork_indices = subnetwork_mask.indices
    +
    +# Example 3: manually define the subnetwork via custom subnetwork indices
    +import torch
    +subnetwork_indices = torch.tensor([0, 4, 11, 42, 123, 2021])
    +
    +# Define and fit subnetwork LA using the specified subnetwork indices
    +la = Laplace(model, 'classification',
    +             subset_of_weights='subnetwork',
    +             hessian_structure='full',
    +             subnetwork_indices=subnetwork_indices)
    +la.fit(train_loader)
    +

    Documentation

    The documentation is available here or can be generated and/or viewed locally:

    # assuming the repository was cloned
    @@ -124,7 +164,7 @@ 

    Documentation

    pdoc --http 0.0.0.0:8080 laplace --template-dir template

    References

    -

    This package relies on various improvements to the Laplace approximation for neural networks, which was originally due to MacKay [1].

    +

    This package relies on various improvements to the Laplace approximation for neural networks, which was originally due to MacKay [1]. Please consider citing the respective papers if you use any of their proposed methods via our laplace library.

    Full example: Optimization of the marginal likelihood and prediction

    Sinusoidal toy data

    @@ -326,10 +368,6 @@

    Sub-modules

    -
    laplace.feature_extractor
    -
    -
    -
    laplace.laplace
    @@ -338,11 +376,11 @@

    Sub-modules

    -
    laplace.matrix
    +
    laplace.subnetlaplace
    -
    laplace.utils
    +
    laplace.utils
    @@ -364,9 +402,9 @@

    Parameters

     
    likelihood : {'classification', 'regression'}
     
    -
    subset_of_weights : {'last_layer', 'all'}, default='last_layer'
    +
    subset_of_weights : {'last_layer', 'subnetwork', 'all'}, default='last_layer'
    subset of weights to consider for inference
    -
    hessian_structure : {'diag', 'kron', 'full'}, default='kron'
    +
    hessian_structure : {'diag', 'kron', 'full', 'lowrank'}, default='kron'
    structure of the Hessian approximation

    Returns

    @@ -636,7 +674,8 @@

    Subclasses

  • DiagLaplace
  • FullLaplace
  • KronLaplace
  • -
  • laplace.lllaplace.LLLaplace
  • +
  • LowRankLaplace
  • +
  • LLLaplace
  • Instance variables

    @@ -697,7 +736,7 @@

    Returns

    Methods

    -def fit(self, train_loader) +def fit(self, train_loader, override=True)

    Fit the local Laplace approximation at the parameters of the model.

    @@ -706,6 +745,45 @@

    Parameters

    train_loader : torch.data.utils.DataLoader
    each iterate is a training batch (X, y); train_loader.dataset needs to be set to access N, size of the data set
    +
    override : bool, default=True
    +
    whether to initialize H, loss, and n_data again; setting to False is useful for +online learning settings to accumulate a sequential posterior approximation.
    +
    + +
    +def square_norm(self, value) +
    +
    +

    Compute the square norm under post. Precision with value-self.mean as 𝛥: + +\Delta^ +op P \Delta + +Returns

    +
    +
    +
    square_form
    +
     
    +
    +
    +
    +def log_prob(self, value, normalized=True) +
    +
    +

    Compute the log probability under the (current) Laplace approximation.

    +

    Parameters

    +
    +
    normalized : bool, default=True
    +
    whether to return log of a properly normalized Gaussian or just the +terms that depend on value.
    +
    +

    Returns

    +
    +
    log_prob : torch.Tensor
    +
     
    @@ -826,6 +904,7 @@

    Ancestors

    Subclasses

    Instance variables

    @@ -869,11 +948,13 @@

    Inherited members

  • log_det_ratio
  • log_likelihood
  • log_marginal_likelihood
  • +
  • log_prob
  • optimize_prior_precision_base
  • predictive_samples
  • prior_precision_diag
  • sample
  • scatter
  • +
  • square_norm
  • @@ -888,7 +969,7 @@

    Inherited members

    Mathematically, we have for each parameter group, e.g., torch.nn.Module, that \P\approx Q \otimes H. See BaseLaplace for the full interface and see -Kron and KronDecomposed for the structure of +Kron and KronDecomposed for the structure of the Kronecker factors. Kron is used to aggregate factors by summing up and KronDecomposed is used to add the prior, a Hessian factor (e.g. temperature), and computing posterior covariances, marginal likelihood, etc. @@ -909,7 +990,7 @@

    Instance variables

    Kronecker factored Posterior precision P.

    Returns

    -
    precision : KronDecomposed
    +
    precision : KronDecomposed
     
    @@ -929,11 +1010,13 @@

    Inherited members

  • log_det_ratio
  • log_likelihood
  • log_marginal_likelihood
  • +
  • log_prob
  • optimize_prior_precision_base
  • predictive_samples
  • prior_precision_diag
  • sample
  • scatter
  • +
  • square_norm
  • @@ -997,11 +1080,80 @@

    Inherited members

  • log_det_ratio
  • log_likelihood
  • log_marginal_likelihood
  • +
  • log_prob
  • optimize_prior_precision_base
  • predictive_samples
  • prior_precision_diag
  • sample
  • scatter
  • +
  • square_norm
  • + + + + +
    +class LowRankLaplace +(model, likelihood, sigma_noise=1, prior_precision=1, prior_mean=0, temperature=1, backend=laplace.curvature.asdl.AsdlHessian, backend_kwargs=None) +
    +
    +

    Laplace approximation with low-rank log likelihood Hessian (approximation). +The low-rank matrix is represented by an eigendecomposition (vecs, values). +Based on the chosen backend, either a true Hessian or, for example, GGN +approximation could be used. +The posterior precision is computed as + P = V diag(l) V^T + P_0. +To sample, compute the functional variance, and log determinant, algebraic tricks +are usedto reduce the costs of inversion to the that of a K +imes K matrix +if we have a rank of K.

    +

    See BaseLaplace for the full interface.

    +

    Ancestors

    + +

    Instance variables

    +
    +
    var V
    +
    +
    +
    +
    var Kinv
    +
    +
    +
    +
    var posterior_precision
    +
    +

    Return correctly scaled posterior precision that would be constructed +as H[0] @ diag(H[1]) @ H[0].T + self.prior_precision_diag.

    +

    Returns

    +
    +
    H : tuple(eigenvectors, eigenvalues)
    +
    scaled self.H with temperature and loss factors.
    +
    prior_precision_diag : torch.Tensor
    +
    diagonal prior precision shape parameters to be added to H.
    +
    +
    +
    +

    Inherited members

    + @@ -1035,7 +1187,7 @@

    Inherited members

    all cases P_0 = \textrm{diag}(p_0) and the structure of p_0 can be varied.

    Parameters

    -
    model : torch.nn.Module or FeatureExtractor
    +
    model : torch.nn.Module or FeatureExtractor
     
    likelihood : {'classification', 'regression'}
    determines the log likelihood Hessian approximation
    @@ -1092,11 +1244,13 @@

    Inherited members

  • log_det_ratio
  • log_likelihood
  • log_marginal_likelihood
  • +
  • log_prob
  • optimize_prior_precision_base
  • posterior_precision
  • predictive_samples
  • sample
  • scatter
  • +
  • square_norm
  • @@ -1113,30 +1267,36 @@

    Inherited members

    See FullLaplace, LLLaplace, and BaseLaplace for the full interface.

    Ancestors

    Inherited members

    @@ -1151,35 +1311,37 @@

    Inherited members

    Mathematically, we have for the last parameter group, i.e., torch.nn.Linear, that \P\approx Q \otimes H. See KronLaplace, LLLaplace, and BaseLaplace for the full interface and see -Kron and KronDecomposed for the structure of +Kron and KronDecomposed for the structure of the Kronecker factors. Kron is used to aggregate factors by summing up and KronDecomposed is used to add the prior, a Hessian factor (e.g. temperature), and computing posterior covariances, marginal likelihood, etc. Use of damping is possible by initializing or setting damping=True.

    Ancestors

    Inherited members

    @@ -1195,30 +1357,143 @@

    Inherited members

    See DiagLaplace, LLLaplace, and BaseLaplace for the full interface.

    Ancestors

    Inherited members

    +
    +
    +class SubnetLaplace +(model, likelihood, subnetwork_indices, sigma_noise=1.0, prior_precision=1.0, prior_mean=0.0, temperature=1.0, backend=laplace.curvature.backpack.BackPackGGN, backend_kwargs=None) +
    +
    +

    Class for subnetwork Laplace, which computes the Laplace approximation over +just a subset of the model parameters (i.e. a subnetwork within the neural network), +as proposed in [1]. Subnetwork Laplace only supports a full Hessian approximation; other +approximations could be used in theory, but would not make as much sense conceptually.

    +

    A Laplace approximation is represented by a MAP which is given by the +model parameter and a posterior precision or covariance specifying +a Gaussian distribution \mathcal{N}(\theta_{MAP}, P^{-1}). +Here, only a subset of the model parameters (i.e. a subnetwork of the +neural network) are treated probabilistically. +The goal of this class is to compute the posterior precision P +which sums as + +P = \sum_{n=1}^N \nabla^2_\theta \log p(\mathcal{D}_n \mid \theta) +\vert_{\theta_{MAP}} + \nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}}. + +The prior is assumed to be Gaussian and therefore we have a simple form for +\nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}} = P_0 . +In particular, we assume a scalar or diagonal prior precision so that in +all cases P_0 = \textrm{diag}(p_0) and the structure of p_0 can be varied.

    +

    The subnetwork Laplace approximation only supports a full, i.e., dense, log likelihood +Hessian approximation and hence posterior precision. +Based on the chosen backend +parameter, the full approximation can be, for example, a generalized Gauss-Newton +matrix. +Mathematically, we have P \in \mathbb{R}^{P \times P}. +See FullLaplace and BaseLaplace for the full interface.

    +

    References

    +

    [1] Daxberger, E., Nalisnick, E., Allingham, JU., Antorán, J., Hernández-Lobato, JM. +Bayesian Deep Learning via Subnetwork Inference. +ICML 2021.

    +

    Parameters

    +
    +
    model : torch.nn.Module or FeatureExtractor
    +
     
    +
    likelihood : {'classification', 'regression'}
    +
    determines the log likelihood Hessian approximation
    +
    subnetwork_indices : torch.LongTensor
    +
    indices of the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters())) +that define the subnetwork to apply the Laplace approximation over
    +
    sigma_noise : torch.Tensor or float, default=1
    +
    observation noise for the regression setting; must be 1 for classification
    +
    prior_precision : torch.Tensor or float, default=1
    +
    prior precision of a Gaussian prior (= weight decay); +can be scalar, per-layer, or diagonal in the most general case
    +
    prior_mean : torch.Tensor or float, default=0
    +
    prior mean of a Gaussian prior, useful for continual learning
    +
    temperature : float, default=1
    +
    temperature of the likelihood; lower temperature leads to more +concentrated posterior and vice versa.
    +
    backend : subclasses of CurvatureInterface
    +
    backend for access to curvature/Hessian approximations
    +
    backend_kwargs : dict, default=None
    +
    arguments passed to the backend on initialization, for example to +set the number of MC samples for stochastic approximations.
    +
    +

    Ancestors

    + +

    Instance variables

    +
    +
    var prior_precision_diag
    +
    +

    Obtain the diagonal prior precision p_0 constructed from either +a scalar or diagonal prior precision.

    +

    Returns

    +
    +
    prior_precision_diag : torch.Tensor
    +
     
    +
    +
    +
    +

    Inherited members

    + @@ -1234,8 +1509,9 @@

    Index

  • Structure
  • Extendability
  • Example usage
  • Documentation
  • @@ -1262,11 +1538,10 @@

    Index

  • Functions

    @@ -1290,6 +1565,8 @@

    BaseLaplace

    ParametricLaplace

  • diff --git a/docs/laplace.html b/docs/laplace.html index d72602d6..99dae2b8 100644 --- a/docs/laplace.html +++ b/docs/laplace.html @@ -42,9 +42,9 @@

    Parameters

     
    likelihood : {'classification', 'regression'}
     
    -
    subset_of_weights : {'last_layer', 'all'}, default='last_layer'
    +
    subset_of_weights : {'last_layer', 'subnetwork', 'all'}, default='last_layer'
    subset of weights to consider for inference
    -
    hessian_structure : {'diag', 'kron', 'full'}, default='kron'
    +
    hessian_structure : {'diag', 'kron', 'full', 'lowrank'}, default='kron'
    structure of the Hessian approximation

    Returns

    diff --git a/docs/lllaplace.html b/docs/lllaplace.html index 108e9b0b..6ea940b2 100644 --- a/docs/lllaplace.html +++ b/docs/lllaplace.html @@ -33,6 +33,103 @@

    Module laplace.lllaplace

    Classes

    +
    +class LLLaplace +(model, likelihood, sigma_noise=1.0, prior_precision=1.0, prior_mean=0.0, temperature=1.0, backend=laplace.curvature.backpack.BackPackGGN, last_layer_name=None, backend_kwargs=None) +
    +
    +

    Baseclass for all last-layer Laplace approximations in this library. +Subclasses specify the structure of the Hessian approximation. +See BaseLaplace for the full interface.

    +

    A Laplace approximation is represented by a MAP which is given by the +model parameter and a posterior precision or covariance specifying +a Gaussian distribution \mathcal{N}(\theta_{MAP}, P^{-1}). +Here, only the parameters of the last layer of the neural network +are treated probabilistically. +The goal of this class is to compute the posterior precision P +which sums as + +P = \sum_{n=1}^N \nabla^2_\theta \log p(\mathcal{D}_n \mid \theta) +\vert_{\theta_{MAP}} + \nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}}. + +Every subclass implements different approximations to the log likelihood Hessians, +for example, a diagonal one. The prior is assumed to be Gaussian and therefore we have +a simple form for \nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}} = P_0 . +In particular, we assume a scalar or diagonal prior precision so that in +all cases P_0 = \textrm{diag}(p_0) and the structure of p_0 can be varied.

    +

    Parameters

    +
    +
    model : torch.nn.Module or FeatureExtractor
    +
     
    +
    likelihood : {'classification', 'regression'}
    +
    determines the log likelihood Hessian approximation
    +
    sigma_noise : torch.Tensor or float, default=1
    +
    observation noise for the regression setting; must be 1 for classification
    +
    prior_precision : torch.Tensor or float, default=1
    +
    prior precision of a Gaussian prior (= weight decay); +can be scalar, per-layer, or diagonal in the most general case
    +
    prior_mean : torch.Tensor or float, default=0
    +
    prior mean of a Gaussian prior, useful for continual learning
    +
    temperature : float, default=1
    +
    temperature of the likelihood; lower temperature leads to more +concentrated posterior and vice versa.
    +
    backend : subclasses of CurvatureInterface
    +
    backend for access to curvature/Hessian approximations
    +
    last_layer_name : str, default=None
    +
    name of the model's last layer, if None it will be determined automatically
    +
    backend_kwargs : dict, default=None
    +
    arguments passed to the backend on initialization, for example to +set the number of MC samples for stochastic approximations.
    +
    +

    Ancestors

    + +

    Subclasses

    + +

    Instance variables

    +
    +
    var prior_precision_diag
    +
    +

    Obtain the diagonal prior precision p_0 constructed from either +a scalar or diagonal prior precision.

    +

    Returns

    +
    +
    prior_precision_diag : torch.Tensor
    +
     
    +
    +
    +
    +

    Inherited members

    + +
    class FullLLLaplace (model, likelihood, sigma_noise=1.0, prior_precision=1.0, prior_mean=0.0, temperature=1.0, backend=laplace.curvature.backpack.BackPackGGN, last_layer_name=None, backend_kwargs=None) @@ -42,33 +139,39 @@

    Classes

    and hence posterior precision. Based on the chosen backend parameter, the full approximation can be, for example, a generalized Gauss-Newton matrix. Mathematically, we have P \in \mathbb{R}^{P \times P}. -See FullLaplace, LLLaplace, and BaseLaplace for the full interface.

    +See FullLaplace, LLLaplace, and BaseLaplace for the full interface.

    Ancestors

    Inherited members

    @@ -82,36 +185,38 @@

    Inherited members

    and hence posterior precision. Mathematically, we have for the last parameter group, i.e., torch.nn.Linear, that \P\approx Q \otimes H. -See KronLaplace, LLLaplace, and BaseLaplace for the full interface and see -Kron and KronDecomposed for the structure of +See KronLaplace, LLLaplace, and BaseLaplace for the full interface and see +Kron and KronDecomposed for the structure of the Kronecker factors. Kron is used to aggregate factors by summing up and KronDecomposed is used to add the prior, a Hessian factor (e.g. temperature), and computing posterior covariances, marginal likelihood, etc. Use of damping is possible by initializing or setting damping=True.

    Ancestors

    Inherited members

    @@ -124,33 +229,39 @@

    Inherited members

    Last-layer Laplace approximation with diagonal log likelihood Hessian approximation and hence posterior precision. Mathematically, we have P \approx \textrm{diag}(P). -See DiagLaplace, LLLaplace, and BaseLaplace for the full interface.

    +See DiagLaplace, LLLaplace, and BaseLaplace for the full interface.

    Ancestors

    Inherited members

    @@ -172,6 +283,9 @@

    Index

  • Classes

    • +

      LLLaplace

      +
    • +
    • FullLLLaplace

    • diff --git a/docs/regression_example.png b/docs/regression_example.png index c6a94587e6a2d121240461926c54a411645a66f0..94f94c34dfa2d16087543555e04f064e8c31c2b8 100644 GIT binary patch literal 28052 zcmb5VRZtym6D>>v!5xA_a7l1?cX#*T?(XgcclY2D+}+*X-Q79E`_=znotsm|6x3$+ z%p={sR`kq$7^s=3X({nd92{)y*=cAj|Nk4Pt?i6ycsvw;0uO<* z5mmDX0YT9FyuK9k6q$m6Sbh-`Kg)1V*Hn7?*mzh!T5%t#zZFInB>?-H^Nqh) zT!Akpm%KQH@bV@cwae!#Az`*WYJ4Fo5+brVf3TwHg<);m#He(~!|OY1Z5p1ule3f4 z#LHc(N)3a(@%UQX#9R7JdXWh~q%VJtEIBGDD%TV!@EbaiFov8!5%{CeM;I#S^MBxs z;*j5=1NrH)<@k$%Tf)9XBJ&3V|A94$NBjwVL!UPH|AhW{y#mUJ+|ZR z%-SS1sL*sYi^n^9ArKK3mmO7VCYkPcKVBsZ6VsLh`xIlOP&nGz9S3#GX-&n&BqLnW z%uomtQRCgb22>#bnujp7(8G)=*-LZ3^FeW56#oq4dE3J}9$w0JsVsjntM2slw9D(& zz=Mldm)|#w)y9~*y1F*Jhuaw$lu$vCdp8das2$Fy>(EZ$S>g_&so!*nX^Xu20Xr6Jp0UQA}wI%In z_w7)!!HEgu>)j#Fm(yA>|E{)nnFYr}>5Spo5``2db2J&QJJgvXnS(%d?d;~}=9LMN z98uJ0u6qoSTH^_$u~a4xV7yRxQ=Py@Z2LU*v$TKdtexOqxzlua=b&kSBuvurdeV-5 zI<1{ht=9jRr0qtbqUETx@PZIuqEPg;%JF2r2cwC9${8|{pYvvvK{A~+X5-`i!6%*a zCvq?{f8Uq5%*^qpgRGA5DQ;khht1v+)NMIU5q@?kSufZ)^6 zT+j+v$ll^=yB(3JYiKmRUXNfhTVM>}I^#_6d~kYo@JtC{R{>j(gZlaPEbn)=Un&(~ zyOQ^QFw~1D@4EPio50(Qk7r9F@VHXz>KLgTH~lKO`r44f`$GEk8JU=_HvJK|g;yz!lF%bO23$kj$WV&J9Eu3MTHSfe&YaxZvRQ>@$ zHRljiHL-8w{hkG+{r+F&*&UY*=M~tehq>D-LVvmfn7Fuj<5m#1Dlm6$`&V{Q{rBgE z4tj^fNiA&{aQ}4saW>AYUX&Kd9R8^+*OT&w#dc4Q1xDs|k?{&C*TD!lHq zbaWXhH0-YnzSVyZ?p<@pbDeZD1c)Pu$1ZPbjrL@075xv6Tdib9JuH zTP|u0M-sU*z4ud$tClQYJ3byh$f>ExqW~ z9}E@Tqs60@OBadUZAM!e2?@a03Yu}rW5FvIBuq^y zEG;b!3=D8?uU=~Wz1wR8;%B*DB<`6tG}x2r2(!Y{;{_3frXW-`4)%5;rBDl-Y}q2T zv^5lz2BZ#TcnmZ|pLJ7ma|wl`@!Cf0KP?1^oT~?P(nTmmGfe|GM@L5h(ufV@G}Ap8 zLS)9zauWnp{0Ul1$hDB?^JnROR*vgVx3H6cXf@KpZBYv9aE^tZS(xO86nx+co9g{vNEuJ~S_BufwtY_5% zmOzYrba$a7>KB8dV`bSNDwJAlY?-YCnM3G`$Ss( zB#nJ;GyQpg-axUfCY!*Ny~3Kjez(9kG#IQfrw9= zwK^2kN6S&Ys2B#K1@JP!tU2*yD!m%MQc57!4 zlewsFyC@nC8`PtMtE09YncxCl$9nO6&l3ZM|sousI?zuw~z!9_@ zKJGrJctY-c*hTlpTL%j($3*}B2>kkjOHNQKTz0m%@Z|J;Wr5K$`k)!+lfL9mxW}m{d2f=>wd}%XDte)=L`@S*dzT}#zZQO`hgP;oegwN8@>g19oM}CJlrHxH84U_ zx7Qap?C(r}WpVI-DVJ?FpH%E5sAz8i6ox(j=R=r(kJVv7Zm7iHi)c;9Ii*$G0o>jF zeSmD7FEkA7Y(;y&{;teF1It4zG~B8pA^oZTIV7Lf=i|~(xL>L@CUYYP4I0mXmK`Nj zg4+6GJHhpExn!JG?RIC@AAwmrV$Q_GB$-U>+uhyG6}7T*w}u_pW@54Ng}yBipn2OM z81x2^Y0Q>i4Q`i#kk}14PR6F(oi}^jagSWwthVG|8Kb{{|32PieT?2r@J#$$&~6zq zXD}LFJfRD1ZVyBR%OmCYiScRclAcW{ki`MX^0MnG6_Xp&clQ@0 znY)lY7-L6jJnQE>OZn!{H$5|x!r@E}fE4@Y4zi=Ch(6j@av+y4AtVqbpz>=2R0&xZ zudPZ-N-UOY0(YV~qPn~J0dV2=dcVm6zJ(b`*U6o5~X`w53RX*y#@X^7+N#>XBBIb-4>NKTP(Z*-;Kfz z?+;I-(c~@w2fbu*U7R}F-9blUQDCLwyblBcxQ?Oy>I(vl2kObHW5ZjvcAci<71@U< ziuLOs-)|x~wqQV@GVBdO0$ef$AhUEXmz!GL&PUS)vvYHfO&RRbZ5Qn?jgQ;mPY)d* zV`mK;6#+Ip#PlWHOOHTYZ+zNMYdWcDaSDaz0>H5mz`qCA)$;{sYs9D_XnJ^!?FX*} zfRICU0`Q8#?qHHMSgu;_2Sd~P=b!n)*UJ-~k5u~!uB5(j^b5OHs)p5W0I|oMJ6^O> zQ&R`W#?HdVLCGm7j(#2$EVAT)RY0ye`71f6Csti>W8_Yem#S3mruEi_Sk_M-r)izdm01I2>64o5L`{8OQT}6 z6XhLSqBj28QZw+wTq)$oCnuPUCWsfUH}C*c-{Zq=xdH4p06Cqyn#1#>vh=XQBsGzd zj|>>a4)6+a-I+ou0L|#kX2}7V9>jHC?W**;`vr{1+Xmp+&bam?R1G!&kp*#p(i0Pd z+OkRh9mNJ7+|xCh!l0!8uAMK`tnm{_+uFZsEgT&l9zHiIKz^}mz3Q(3@dt3xQRvV6Cy4}1 z%4oUH)CX4oGi{u98ez3#Br{XCr2LZ z^Z9b9Eg(PWJ)Ft|Y@C3F1szx)j*C|RLQ^e5NLq_UTIF)}zTt#lm@HPBxj~un4kUAA zv`JMwSC>5qajB`p*Lx!mFYTq3?adl_&4GwlbGjGKNsm8g!(?+*NkkZj@t?!PcDlV^^V$XgEC;u{+q3+iPJAl%c_)7o_#KO{dL3ISO2`~yymr?nuE zuvSCxmqW%Sn=F`BS&K_=;3NPH@iahKKkoQ@a1^4SjU?K^Z)b4Wn^{rG`oh3;D( z9i_w@b!=!m4n;G=n>ZvowdSDC`!#w((VR&2${7fhd0Tv$!+Sfsw>KzKOAb7E5;o#spyB@%BOdRgbElf=C z85mG{4za(5q4;vOi(rJMHsXRlJp7UDEo;a9e6=uSWz#;E(M0ALMNxWgKo!Ew(rv8* z>~|e0BO{|qCW*7sMYxP&!;wb8q{$s2TC$o4wg+RfqP?B>ddas?Ie=^Q(@}1Z zW>C0bL6$*dv|lO!A_fGqnaRn{-cXd5Oa`gAhYjznU_7@F8`jnVxQG2T>;31G%8Tcv zlRJ5J^=0$l%H+>qhm0Z2gcfH2$G}!ibX0YJpqt6%!Vp@h<5tmh*8#|3=BME)%6*(W zcZ>|i5dD`@G#}frXRv0BuVdx%Pmzf+b5tF%zgO86J)mZMb!q%!0iY6zpgh=+L+mYo za$@5A9CA@yd_1b1o!z~Qc8)ADjgyjAp~%y2oJ=yiI1x~2iOn0$$_zCx;%4|r>AX_%hUXw+>Fpym8>aUo0YUF`4z1BN_}H|fCA(zM+oo6nbZ zojEaBE;F7t9pC}@c$F{!?$5Vs^*|7L_qViSdw#`gAT!ps^Tp5k|1tPNoITp539i z&_4aw>!c2Y{n3;%^?zaU@$sH`4YoVNLGZZE12+mq(tSp?wjXZ~42Hw;TG}sX*4`sc z2bnN~+ZantPmeuQpibdN1_pm(Vr)AP=Hvd8l*D|;CPR`##r{Soqn+iDQ>XdP%-?W` zZzaFBhnV8VBenw=+4A}dEOl&F*2H#SI2#br0F*SZZ0rBjHU@4OjK_B)7{;^z{xw4< zSgkRH0b)In%7bzYmmH-EAGq1fPKsA_YzbF9zt3q6c-|k8r8B%ZaO*si7#i+DcxZF| zCw|4CE;`_jAP3}yj*vJkJ8(v=H(K^X_QCTg<+rur`kbDg;%4E5>(!pQ!81b$R17P&>8Uf^1K#ts; z>An-m!nzfJ;$-_T)rj-X5BJ4Ct_Wu=mV_hVvdQ?p>;CUgV2b@#90TY4pUhVDr=t9u zukG2tg=;_xxdsr^i7P2BO+rZM+s*#iiQ9Q3%ewA-nVS1s#O z6o6h-op2>0s(w9f`8Fh(X~$^3P}X(vfC35DxHGSF<=wC@t*)-_oN3em5OKg8;DRc! zZt);_o~NQ+?&efnFWMe37)@jsZis{z#SS^>!lkQlRuhljb=8QkwJteTHVDMz|%%HHmqZ~#WJQg10R z!^mvC9k_h>L|2l*M#BT%liJ3rSiCf5`DGWp(A{e7kd9Gj0#OprYO<` z$hJ{FbblH?z;$}O-*0{ZdSmnbJv}{rvmKbdogD_Clzmd)99ff&seFHMxKA+dw`#Ee zL@*$IbZ8?Dz{lCBTwB=CzmCsyz1<3C^_<5XWxdL`Vjnp#vL3GeM5S*nIVhne?Ex>_ zF(P<5ZEYEt3`VkwinDhv?@tHLt{ZFz={*3#4!>Mq z9I{z5MG$hQ@+9gp9GDWo#R-DN^cyq!J(^4x?BFq42Pq(8hJB^A|99s~!BODp%= zBZ|ohr1##z?%qLMQkJ*q??Wras)$b?VpNp(B!n8El$0QrdUyim`=4i7G)GmUxA2lX z>8kt5&UR1!djlUL#{SQ61c;DV(y;F6Pk+Fg%^gy1e-#L6e{SpdlMJaUWEl4%nwOWRd+6AJv&#Ei;E!ukk{g(xWSAdKk~~_eo0+=IpHG!+ma0_>=*WH zG)Il>9Zy{xo|KirOd?hlk4kkN54`<=sK3zw%$Ec{Es6$JAtr-Mw3cTy)$Iox1^lfS zrQiP+kibcR43$aAe;u#82JDyF@)6{~pk%g0U4y6_!6!(@k;#x!QAGlB-ZC(iO~eK%&f1&^Ex>>`HAGp;&1|i zczIdf%*@Qh!Xm$*AaH(uUhCb`)HJ8OJQDa@5P@Y2jPNgvkhA;>o`H}QM^lNk1r#MQ zen2*n{xn+1BwoZc5;L$6hatU(A%hz=IGSe$XkgIFdfo;}Kqq#X+y&g!Cfv*>%+w~#3|h!ETErw;2r@^=IpMrs z{ob((zsvuJN?XA{&L2GO_eZzxSFA4zH^6T3Ba)bxOM;smiS#-6@6d|Cs z^iLN^sMT9w0=mWX@J_lk83_pqu)=W}8B$J8%m7%nyr!k3gz908W^%p!8Av*H+N?XO z!?mK$srkkWJ%g@fsrV1Pd5y$O-S2oFe?|SPg*lY$+|M5n+w;K1~ZcLS}*U+CA zP*ZrKt~5>U2l7fbK!o|cQfT$Q0#-#tMt0y6P*Ei2#9_1O07!efv&4J4*6YJ)%_g^j zYIOzQillf9kRp01mFL{ll;`@HD*c_n9G6YPY&1Es#p3_C{bbeDW+-fRqga<)Tr12oUKsJ`Gy8Oa5YuobV`{ zhzS;^8Kh9T%nUo(0ce87Wt>faI4O_zCc`8}wCEmP13j!Bm}v~TKf+>(Eix)aVr~Q; zyD=glpOsJQiO<0BSpaI|$!-^~Qfl^@zSmz(CC##~jJ>!i4q0id2K)`Ea%yL~9aHe< zAc8oxWGG0p1%9eLpE3d3e1ov;)+$N0RjGY zunDA}B_bTDfs1F#T&c=Usm@y%P%f-Kal__tqEyke{kHKu-!M2bB5?@`K|0-5h7^Kl z(7U;Akuw87T6}7!8?^WGOu-MCXl-q*l@ZOCVif_p_?wBUO9VUB<+l)(l-E*1PocRa z<-~SlPE!QAJ_gAH6|<~j5s>tAg1+*(Vo^8a%$kJK&@5gYU(a zl?U}JRy&p6Z@&sD*0_x`T}eA0`)EEl>A9DCgZJQ%Jh@Xtpzg0+R|fTLs4{X@v_-9H z$5icA22dGVY^+zxQZpi&vCx_Jk7GJuh$;;?7kw)wrb%)BibaueB764Rmmcz3Lo`G~ z-r!-NJqb|fk|ydfO9BOlb6#I!;B5?jFk`6+bjo1~4==jDzCfilQ2sAtx9Nd<^xy{h zgqF{$!37JC($oup+KHWf`Ld7%Kj0CRXGs4>$TM57@Y{o&j?0H}p&3?>?VcbNtCLrT zdl^AEo)Mr~nRZ#Sh}tjVpT?nptig{f>JETa77%swaIXv}tr&K@h4pNX+VL!`I(%$#g_%6tFBk2<-zH>CD=oXa(Q}u+382>7@xQ>v)+upm_WeXcuvG1 z^>#><(dyeMyhjdsmZcRb>OXS(;2bv+hl&744i`Iu7UZqrUBL8T&8M9FV1hO)KdB%HSnbbyL*@+T*}F&>2Pr8vis9> z++Xd$a64W%SRT(ayz|=D!T}Zhvy!u*?M@XmBBX`F#q1V_A7! zEjTxQSkr*ojM5EU0&>28mp=sAV*sj7;7rBLtR6CC+6!8D#wo9X(zOt1nRzhaE9pjG zdbfQUjz~5oNyg6mE%_2AhCDjP>%?hl|2q&UU=2E|R#=c_y8C@HbIp;JDTajKi9^V={yyAxE}0$=h739^1LG)@n~7`%R# z^QQ&Y9AB3o6<=WyIE7f^_bF|Q9QEBxq0%^u-!S9HQk&Kw&h@bIGjKMAwab*-TXR-^ zbl6MqaThX`2BKnKxf6vdvj5u(mnx-ml%TiiM=BKc2HrLCCXqjw(Xk^~u;m(iA1`ii zeHEDAX%UaL`gVaU&2X)AcKhaw_-F~jI$p4g@+KyCbT9SZH737_q=PexF4{9;3KRVb z-Bw9Zur?9S$aG1qAP81Hridqf6FzTgqBbZ{Q!RSl&F3^%{J%FGR4ah$cQ!ZZ`RRBg)0vlxv~>g}g&@e^_#@ z*TLA0U!xRF)`^Yp?n(L3vpxckMsHZAwpc;4Qa0tDdn&v4-!V$sUX~`#)S+J!{|OFUnw(*?CBWUfdy(+ZWsv zZgDd!O|Ot-PBWxr{h(mLyb{J~ZQY?6trOi1rr=Fq^}JW%4C1ErW8hsuL7H^*S$Bs^ z3~M#NTfL#ugLj3@=-KWhR*rvuc9V9q!!59dDrOYq>{nARQJ4q>9o#!<&`x0~>yC5i% z_o^aTkRj-AH*J!Vjd`sO{V&Iq-9MMgVrjjrXnkyS@gAw7Bxz`S)iY9BV^Sf<5_`o1 z%ly>}T98VZ5!wiLvS2MujvEqT?_x>}B;7z~sIQ;?cE4KI!+ogq?(ImDa^WM_#cdOU8I!k|1{B|*T6K2y#tZ61xD3Im|@{dMVw2+f5GEoVX<$#WxFOLFc__U z;x#F*J#FVo7s;$z`0Pc%tZBuikFA7ycj~Juntku@oxmX){AjeK-&5y@ppZprzBW5V z`?VU4KDOs=@i#1yhg%w#IwMLjE7LL2LcHaty<}oIE&Elb@@f!C2=vhDf04g~4^gx0 z{sqFw%@BMO?@9~l4a44yQFTEkdPGQ%nhw{zDH_~e6>pjA*phhYGa+%{yk{jt~dL1&gYsy_fmhH4EGZ5Z+V=b97Pfa zO8qw>%OIedOLhh5aQ>Kk;i>oto~z^O2<$FiLh*^4FQcBR@rJod}fGx4mfVe zzKa0`853TO0qK_jTa7Bev$DEjC_#cAybf)+V2`yhBsxd5fnir7H)88Fk(#NW`V0Ms z{LRw8325_N>L9JCMztFvKJ4l6cagSTN_oY*>MYu>Rt=~AyvtL1ms}E?akiY}_U%Y^V*2NSY z{}haaRs2f2Y>Q+w@7vwA-~BvqRbXm8e$oe7rv|oj1{Vk1Ot0D6Vf0rshRjKbd{m+z_85wYMTHr?tZ?h4Sh(||gh?Yx>X z9Uwc7b6jvTTP#j${itL`)F)^nTWnNrrhS96Gc>RlD9*UM`$GEOH=u!CfS*n`+HZ(* z8Zk5jm3BEWK@{QH=)W+3W(1P>&DRLBeq;i!zE>sOlb6@zgd6-%v$Igz>u zQKm_FYg3*8Wv{osg6n8{R+eDt(g}C-E{f z=EqZuRc#ncr8cj2cM>a@6CsL~ydzXh?WS@9&@WYBoM;^WM)ZSu3FZ+hRpxZfuzd*x zeG~g&#;IXgT#5mRqv>oYSrY^P2;74}Zz4|xP?oZ4Ib;5mPj5<5-Fs}ruVCs2R0I-N z87}8$X7P4si{56RzWu&-&~BaV3DIOjK<2+ZH;Ox1hpx`;+AcHpe?3&jQjz@DSkPXh)9^qF3ECx0^=egzs}9Q)k~NN9}4qnL*Dh z$^xQ=Mi~7}VB8pnCB@GP@6WCi0-M7oytWlOh$Ddj^=LG@>l0?NqVKz$PVDv1mr!=>7{(6MRYSe^G`7#hrZdKbGGG{-S;5l6u#k+-+~J z!Y7eHTg2d%%L{HJ^thc8Y1$6^g~4KlgNccmZZ}Nv`H|*liu-!mL#a_8F_kA$YciF4 zj#0VITSbU?@5k_skWX-gl=>q&y8O&9XNmp7YoI4&759#TNor@eiy2Kjp&Nm0NNp4D zru+4_+@|#-lLU?!Io92VBE@cgv4*Mog{Yl<@g9n?%#Z0K@cT!MDFH}xMhHdYh&oe`e zev6~?5O_1ra;*gjg3ILv)tk#o*6okn-l_hKH3w<=QB1>o-gN9qo}$HuU!P&+4qTq@{JkXDy{57?oAOw;JJsb{5n> zkTbPrv+d6FG2;9C2}*|Pj3cVKs|rQpu{y6gqH9+seRlbNmIeT%4ZYX+}1$PTQy8E5}U0`o{tHH zIwP-dp1&g%8k5Gt$iaMcn`v|G6N*asWTNr~*!Ok+5Qwg&I(hGF+t zQ|vA=0RL+QxLcd{nrttq}@%u%p#Eme6RtcO6Faf{}ew_Z76JvKvIfm&WK;p%_HFZ zzUd9>vRJ_%BkIY6i$&(@7J_n7t0L40->q86C3&C^Mon5PP(KLT^o@%Bmtg;~u;P9p%D;{CGG(71Eo0 zxIqU>AF9zHJq{-Na>I$6dL zb=GI6QR#R=!eoht;96XnTHL6kF8A0?0MQ(2N5PypyblsqvZAgnP@%!VEx;@B4^*lqv2~cLt`-LJI5fJ# zwxXAs&d!)YtWwTN*DOVec#~9`*WfcL)pXq-Q)oTVcBA51zWwT#DOzlD%LOIaO#8U6 zso^FLoQC%6P=?N_l%6Lpe}-iqzrbZV8t?1nFm7np8n-#av-02=3_^>Wmc;hvWYJki zWz=Xbw&NrljU!O;I})Pb(|>DSZ}FvY%hXMSO$~eGop~C>5@;JFj07n<$`a$4w73e5 zmug0`#2X|J4#WJzL(gsNr#LbZefmL=0HPuI#XCA90+F6BgO}l+7Zi`b3^E@@lL?)< zb*0n^v3Km6)K?>Vds5#n_vCW&=d=r{Hrqbsi~i5_b{Aq3k3eJLD$3&Ph@2j+-k5PS zBess+)g0z=4m(8Ss9*a5NUn{z;AyEGkiyT_bkEvI{6aL72{L56aM}!yZxH3PFH+#f zM;_j>T$!?^7osRt?)ef`+pNh~_T)Uu^*YlAS=_K32;<^*OI zQmSN{mA{TvUE!uQPt74qLb;USU*lW=P4WBY%i zPrGb(S@w3xvHlR2nCdPL&b+;Ly1;?PP4tnYsa{Q+5rP{9ysE;%A)#ZpvX{MES1xxg zACqDo{~da$Oo-c*#TNAEM9CCJB=e61H)2b=U^PyI!NJq7M`b=K*xlW=vnvRv=n)@W zSgu;|4y_oPyBq9wkj7RRT-BuLK} zD34s}Y>kJw+P~9uCn6a5TBiAn&ThMZRD9XI7BfxfoFyVMffH$kX&@l`Lcu1>=q1AI zQz*t#`uFDy59Es>U1${o*eD8$DLUS@!;gWL1<|V+OS>AfGMBx4MGQMagG9d$>E%!& zq4Nf8_(=!h!h)7J1e^DnSaQAPqAwxjxDq91d_oDtbJ_~x3Oaj!7Qv;(#u*CicTOtcc-XR)5oH~DVOdx@ZMjwsz_=>{Wkp)8Fy95K&`_pr|bsf zcKG7NWZ(HFO?62(_F(F9jRMcLb_DNhk90z*mPm8@8A2-IO}>5EG)z(=K`{YQ*hlEs&<+EVNy-TZ`NI?uZ>m5$|ENuq5O7n zG9aD4GC7gTAyw7~2-Tb9X|bvu*!xxH44m;ocKvt5jr9=q9t>$Mqd zV@&&nr}^{Yn>4oBPIDeEh{wl2#4`f(g$o2m3Fki@?Z$nAkg7M$r#PEK--@saaV?7P zu1}m-zC6CM1sWK;*f6VU?0nQgpERs|KEf#)+v)WuhmR3sY4A6t8esOmykuBl12 z={lkuqC9rmI|^V4-0Nbv2BAW>h=eKMAvdKu7!7$lYG@+v zl$&mkvh@vBS!7^kZGN=XrY%CjyfRho{Z27nQ~*;qh%fq{@C9KD9+8yCwJpI$lGa`j zYlE|15gg=*8T0#cs~Fls1*%#pwIo8T_p=z4&AYlmk1lk zfqOeIdiA5hB;RDh#QkXrET-aM8FZw5Fgxq7hRhkwQu5k=a_ACy(gp;K?yFu-2C0T- zxE?bu5Izg#c_n9dG5kSjz1B-=3Nv4S=^R&|qZX2=Ho0=pdH*V@ce%p=QzDFZCdvb8 zB;K>ffe#){i4^Ko5ox30|2s2IC-jHg<+@_w<;$XPg?R*aeAi*FLw&=yP_=pOjU)?l z0!gRVe^rOp^d^D}YK_$?#ysm`gP$JQZ)+NMQi`S=iMGQxMZ&5$lVZNbzC0MBFE*lpv<V?Y=kjl zyL@Ib2OX;B=aQ0GQJGd&Zdg_yiou-8EVEYfI}krwnHhY>RJ9s}+pSo{LeJbDLL==J zH7v*_(*Q(|)s@==_G^aNJypb1y?8?f!EeykbL3pMo)r2w#_J%NgpOK)D$ukXhHB2V z8Vdxaa$lFbl3igaJ^Gzef2B&^ou8s3BD=6V&kF{4lO%C=DYC^waNa&;&=h7ASHlb$ zwH@&u6dss;9P+UGQ!b)S!D}Ow8LAX8jxd|Y3j)bn&OR;}&9nduReg1s$R~nCx*#g@ z#Uw%wy_!+^`{i*c!XxMaKW~3D8P-FTCWfly_0G;wl@ZS_XvnS1Qe&@!tw&KQ#ej72 z=R>gu%gEdDEi&_Q0wruxE&(f#XMx8$HB7ZioQPpmuo#KeX>)xK8vRHo)eS;0y9Vo0 zTa>=BPdO@Lo;{6huBlDv6*GKp8A^c%VZTFLVqq}kZ{vgw|HWIpGLI$=O9E6THSM*i zwewyvPG;U2Rpe4#sWL^D$p$Y!|PG+=dWM3O`4yl3O-N3O$i*UvCoivrP>??Q30p%tV={#VA^rQ zc->zW$9z&_9}7DEbr z$fSAo!p8ViH0E-Ccr&G1$Di(C$UAsKkubT=p-|kxhYVs8Daij|MHs}U%=}luc@-@D z$j_n_SovE>6bM(FWBr8$*`b&}C=Y~-YX%&Wg&fTCdgg|u+WeBv0}^Foc1$$JM_C}3 zHLf@WNHW6<7P~927WL4@_k9Mw;Z~AkYC0Er6*<0sQA~EzW-K~>!Uwme?N(Za4 zjx1708U6p7unW}SMwC$U!8k|b?iRAAC}s+gn<0htwmdD^}V$c6oXOr#_ z9n{t`{(dbi?yN^i$+2XN5YugJKa`NiA#jdv02g^tzBG7sxa#I7+4H#9qPHx-FCFun zVCfNEB20Q``8+)RX08i;q9?X!m#vSG(Lr_fy#m39t*9WbqE97^;_2E5%_8n4H)>W( zphv)x-kZKnH9=XzsoM(^XJ`EQIOR#KHdu-eL3(X>wN;&sE*v}r%D+Co3y02woK3p= zK%Cr~csDvGuI~}gK6PcNnqn@#5VD>vNOy^g{&{i#tAq3udYlNhW&RmtIuacx-H&d1 zHOwI*wAUdv-NNn4^)nVzr_~5;ykEmSU}MsdZaHf24y3 zhI;i`;M3O2ooHEE66E?!XX`IcamBft|G_EbHUg~|p8`oE(1!8FWz!eJ1vty-0+iR2 z8H}a2@blvTv2KxL>KI4^1HXaC71W_7`Ba6+XsWByBwh4CltI=nGx2vilt3gIvx z2-M-kW5>$Zi+-5Db1`b@d%*Wd2x&Innakr8PMtV}&XjMK21W<`y#Axm$8TtT{t_5Y+a-uMXB%jY zQiZaD0{QF;$@`T??c4#!yxav&KMs#Jt-GxPSvOQWiT2q>|B>3ZzLP9Heu?8Rjrz$D zU4B;%J5<4Dp;1NIb$yq1X{9b$phQIGVHA)UMQiOK3AX96GQj$;xD4LV8D)A6><(3$ ze1dcYSLeYY5BKn@E{s$sIrSdDKrOS9yBHz%_E)zkaW~L#n*y}HQ%_8!v&8|Yc3mn5 z0($g558+1V=hZfcVQ$}Gh4TR80P%jmqLj~IUJB({B}LgAiB9MudP#jN_%6;OHcrdd zGfL^ZnsesqrkXXXVF{ z!xwk+y~1`5Y|SG|(@9s4oYxkv$2(e%i~klKZW$sp966Zot|ik zeEMte{12-v;@LsdQ`hjx?2RHsCq-ssx{~GcY}rqBEMfXk2%G~fCJde9j{HiFs94ub z8Jm9@gfpa3s<_10XKtx~M>=gak&gDx3=#<6!tV_$#K@rvhhIUfXL7F6JNpU!Tgl{H z(ll+{K^sP$L*ofnj)dEK?F|fi^EKMgF zSkS^kQpuV8>}=hn0~l!N;?k8DAr|9t9IKAE`*jTMiLG(J2>I5P_Fat*57ZH4O}k+i z=^qL3OcI9r?q->%8Okq)t4xhQKD^ZlU}^rGr>T;!?>1C>8;gy=q+{Zy{X70Si&=X<8-xWOZH?X*i{9H$CD>pVq#?W5>gjh zg}YHgnJ+tCnrTn1VdQL;O0ysX_>eA$*2dk9$DRDCQW)5z%xP!J&l@pMCXZ7Z1HCvn z+%7}^BQ1TRY-nr=(Gg}(>rtzSz{tygH78y z-*aA=23w>HTjNuCZj?N>Id#dvc@SP-B;D|Y5q9rCw!wNZzW1bwRPSFk-q2kL;dF=T z{jmM5xM7B}v%?huK~Nh$raZB1E-4AqF6nqxMF9Wp;Ft>p=WHMx?mkXl zP4mI6l`{S5vd2sj@89e3Ne0 zEDWi$Ql>!G;p?aZWgM)?XHM+rSr|0GD64_ot7qorHKLf}8_+dWqMu%hu-sXJsQ-CV z2hi~pF9rH9QLI@x4DCaQ*ZO9o4wCMF(=D*zhyQ^Wnw5e)Hn{S07V!2=5{!gY5z6y2 zlmr{V=$~m&X1$=^>9h;wI5(wVIx_10Te#G0iBRU6Z_k7-$IImoaY7u&krsKT?Juf} z$#J|5!;PRC$|fF>vedY?_%j zcN9T6H>;JE#48Qz!cy?5u>LZOQ~3`zVdCakyU~8O=!4k&*=mIbCRn<(IfFw5JBD?u z69<6?N?tv9(VMlqr!z5x{@>J@hxnI#6FsBzYNt%@2r(^oNAOg~@G}1_SNq0+kmvnbXqm+P@fD(dwp6z??z2km3 zU(Wb{*kd@@vRJ*IHJ>?uldJrLPBvNWs#c4fB5A7D=I&R0R=mfuk83jh%KOQfSz#P` z$tOBC-3H?oduK)3{0T@BiNzkwp-5>|rmmhEa$F=cVH0D#EZc?Y{+(L&VleR_F5QT0 z8P8(bNkSo|%)Duj+u#JLM#XhMn)rPevGUjsf7}$#ahlOwRTT|ajkx_a)iWTp)}M%M z*rpnW8F#ihh{+iNQ(RWH6&zXC3=qcPhW%5%}gJoGI)|2h#5M zdv0b=eFJxTe4qvSs1!zX*Bg%YuD^dmUop4uT&6pZ1}2{$Km=P~t*-A}jCe?_`~98& zoos+|I3|5caFTv+?#}VP z3r11+NNZyx-t`J!V235^wb*ki>PVoHJ55lu=zuMYe8@{&Bo#&?^CsxcFKv( zGjlSvstsk=wfUb14V|TCxNy(}R&h{z2Sp8POnb8nKn1wdW?MYSlNVi(JH<_h7!86- z^f+sym!K~9<=^Il5x=X>QrGiy*~^6DMXz>@OrB8Q<*jr$gE$a6T-8YPMi=Gja8udMk5igo+w$H;$_M;YKNaJJO1C63fM8e_(!=Ubugg|EYqE@4GLq+&U* zVc(|odgZQ@Vq>Y^m!n8eyp)DtujJqs(U?RKvYa%f^i{c2i3^e&A%hf{F7RUcmwIPL zFP9h$N*i8<>4>d9?1q>u!SNqTc1g%i3)7M+S!auTRh#z3wwQS4g;TbeMVwcA(ovJ| zHWt5z zb$XzlwnuytoQtd5Lbf{jawt{2(|2=SgAh}8(}p$BE0N&kg8dWO-@Df+K?vokB>wI- zCTOM1&&!qN#k{6{34c@3B5*-=W*<*;=7TRasMU=UY%^GNm+(B~&8^ZShN94=z1L+F zSuI;;K-gkgY;uny=M2nn+maQ)ey6@68I(Q!W|ds>7`>+Zz^^Og=$J#G^1Qqyc{@fc z435&jZDy)1_`M946d8{ZqvDc1u6>WS2GSRQOIWXin^Gd(H|#~y{M?J%`P|0fxvkDJ z!(bC#s?u^vEQ|7ei26?wDEP*~iZtqZ%QHqDkLNn}8t>n+beUWbzDV~N>S`WWcD=5~x58Sey^*`;NZeX}vq{Ik zMG~QYveu`Z{%Z5K(xYCmW-Hl_0(}C(UL#{Bu(ws0K&}>2i|MW3==0^rGY`2~r5}ig zeOhbCmEml39>L!WxEwOutEhtCQ0vWtVAg}IErESQS~~YZSCKeJptkph%+=d1PU8Ka zGx9@}VHn;$ruC8d=j;D+IuMT@o@GR%{eGyYTf$f|r0` zMoo%YJ4MKh8>9UQPWqQFp-_}B93dGo=0oJDskKcu}jE4fnUW!K7YuOtI7EmtHyG>fC7k(WigWQlK&t=PpTdP0er>J9nO$I;*IsrgCK4OO@aXXgc6^=I?nrr4g^{1?e+SV;5aBHwt$IB zT+CmF+JyTn+3KcfnqO9&v387`BBX^?%Q_LeNfG!vkL=gD7koCv5rgeF(6TsjKlA-C z^2*hrR$8m#K~t33iMkLO8=?WzT>kbSS?eZJ%RY>5AYkrMv^#)O)+ zpITCUd?1^<9-c&QK(RqsK<2~Ekvq^|mNqt^JQcJm?>39P6J3ImHQfNEWG?w3ys@HZ z0l~PRf8mUEON<>pcq~24NFP6HtZO`&c;0$LVF>NydV&;dfXVR5`+l$B%bS#HSu=sP ztu0|LWj$$bCat4imK0Yvay!~*thxw%$P?s>Q(rGCovV)Zh9R+IQd`qt2if(ve@MU7 zLxA-!BC+QrKkQ_cpytUEf{VzVj#_5Py0?$sIOuCF=h^zykKjyCB+4~fKJkj!d_pDn zbug(y;xFYy!Ibx3X}9->qgsZ_ALct=T=*fspjlIIP9Ldmtb}>_3PPiSPWR?>2oXmp zM)!nrq83rZccEKSF>a4|*D}&lVaykzbkS6^l3*%C$!9m5SQr>XSC7A;kGJU@p~RXR zNdlAamDT}r9euki-?08`m}GH3=xK7?0_dztAp`L!9G zu~bULn>DNI9J{E8srH+DIw_bS$)(Wk`9sM1MMAem$@QZQ(!TstRiT#@J%NDFaWAHS zTPLF2QisE@R|(Yl?Vl}QgyJGucTW5e_6-cqMv}8hGUW|fctKdJD45P{)^$tC>c8EJ zLm6vr!E#!Y(!ng3;q^-KV8?72+j;ZQ)b~`dlZ?OQ3bhmyH~zcUU!AZBSqAjKB%;yXcIJOmab}W_Qi0E0b3t;xcb;owwmYW}c|Lm!=vxW2C9>+!;L0s}Z zBT+<^0*N*QmQq$ru(8We6Evd2TkNA+(LquN8QW zZ|Z-g@UpJYQ^(ufU1ha&2GNMHN#i{S;>)bKaqsIA*4Hp~Z!6Ob%p!YW?XsU%?lyj5F&_2``t0&Q}0 zHF}80s3FGpJG5{(Tc$gdkwhA6w z{5wp9IfJj=^zFBcQ)2Bc=yXlE*??yS=%RRu1b8bl6f7>Lq+EVHbe<~t_o)qO#3 z$Ki$0>O5JLZtCt{hHadM%f&k%`JN&c#1l^O1k(g~4{K>vSi)JDa9=S@-#YaVQ3&HW zft)-$uwOjg(Q;Ul$1->C^NUP0uW{Tg);O1>9FRmj^38v!>O>R_9xBD$~FR z>%gadYGz+Uvv@IdRi@EUeWzCuR#b(gu;#msLwZNP^8#ygv4)N5?HBKz>nNiK z&AIN$v${vev`o{I-??i}9c1!GLMZZ6&=dGi1bHbIF&#*F2l;JTtkSDz@l+D5QB7ZT zVgVAXV+AOQ1y zORM3rIG#wTpl;M9s)*>C;N4B%UrXI_qzmg?gWEV(%TaX|}V?3;a=H z+gA~VkKPCjL+(F?5rCIWr3=JwNTq4xFqGcqPa;HVughvwNxYiAL6Q&d<#_&lpY`1_ z2N_Cw)F$d_s9@|XA4ctL+iPT(}ygHtA41~Ivxen>h$#r$qApPizt zueGr5d3&orrtfxj&=mUmf-EYtRM^R?<6MG&vA)obLj6JVHq}Y-R6mob>64uU)j{IG z`;^(l^S_K~Fq`tQ-eY|O2DQpWAyMPU$G*uEm&HeoYZP<}^S4NH!)%~dksPd)T1S8ZMGg*CD zsSg|h{7p!{l}TQAgNSv5`YaCVINnKcxu+XfJ*c{JVa1-)9I+0S@+aH-N7wsEtNSo>0|Eh<=xY^SS+RV&kZwk?K4xyYR?XXA2l^ zmOC@?ePZk73N7zXB30odUFMIcN3z!5q$+NckXJF-gZE3txupZ=0uVgjg9!L5Bcj8@ zLnu$EULYy4ASWhnLEAu=WXidnE;}B!lOB)r8j&yLb@SC@GkT`s;^@!6H%+zMdvmc~ z2cn0*{9kjkX9?enH{~F8O_#+)9GD)2#U8S8AEZ5o7^PkPR#f0^s%7kdPwiY@rGG(4 zDFw+uXHMH|qV9?U!VKEtV&AWL36(9PxHB|GF@Y*E54d*@;tK{MDlMHX{F@I!2}eGh zqIuyK4At_a)$&HGN^vWKH<*ve1SF;h92+TZuhuH>FiPwWt}w{YU~-tyXPC`%coK(3 zb*zngn!BiiSQM++8+A*=8ZzD<<#_|gE3p69&r?{aNc8|9L1jfn#So=Wm}gsXlgVuH zywz|HA5$NRv9kUY+KXl+c)JaZW0)EfAIAFPW5!=0lG(<&?0S6|zQGW1DVq$C-)1^Z zd2NQU&y`xOsj!F_lil-60rO3L-56fdx0b6s%Sn-@v+pGoh-9Ss2%QRI>k~w(x_T9m zajV~5FW5BdG3MGCO{e3fX zCtLHj(VW>TZ6eEShPWxgA!R0zut|HksY)>4#BfF5!BN+T=yRs-Ck{W~az<_sWml&A z^tSgLcI%;}?@JoDPYNXrlocyP^BFE-IpESY-yMH>=4}7m(#$A#Q<^9vig}aVD@-Ds zj6FjqfoZZhM}{Kphu8cGUF}^&g*~>eZxrX%g*2);_U_HYJ*e{=3%J`>qQQ|~;)mv2 zUw}~KpGLH;tXbJDO3bDejTi4c?9tH0MR=1&KeM1a#CEuP8_Y&U?7f4zr7Wl?h{vEXB%k8B9r`1>@1C4NvW%+&NK2X$=rS1=957)fnqoL6rL0INu06}si=dv z@Rl3i`rO$oClw^jKJJxsnMuE9O(zP*u*eC(fjvNJK;RC8SP~ZuAt51DWQ8ieYv-st zJ(telFu^{fLOwlu*REJ@D12k}MZFZ>!rqNbVfX;Dmd0u#zW`%}5mJ1m&HW-U9p@S; znXg7GJwbQ;SrntOan{GW#wo-qSfw;u*RLJLRGn1I^#vAP4Jiv&oC>M!!xcT{y zAhh5dU6`zxA@Jkb%d4{kb#%4o1e(Gogblq_6!aoq|Un`7d{s?BDD z--m-j_`033>u+8L&-^NHE^wseR!NL=XMdRcQ0Sy2CrNZwu)3`M7&b^1kwzeCERIdp)=fcBDcy!i z+?p1z8DJ?FrXFRKc&^4Q9Mk5CnrH>9K5Kh3L$zbz4UaBeQHe%ALBn>V!5_qckPak3 zwvVl@0?}oyeIW-9Z8LEXx+;pd#bT*S_mflmCKR25hC!fcx#d^_WfJjMb6oTh+msMAor)>d`ue2YX3MK&bIIB3!{y7F02n~M z1OIvk*r#KSV^mWBf>TlRI3KjKy}Neo#z(JeiIR+9q!Ah!nD2QOarl9^h9)gV-Mb(U zK~X(+nt<5XOeJrPxO+Whs+@<7T+!OmP_|GB9%q@R>~0D97hOs0qWwwK`0v5oRI1;P zY_wa^RaI=)*VlLwaeqd3ChI+m7wLX6wC4$AHw|vdWA6t?57!L0nu?O#TB3`x7_VW1 zJhMtV@;BU*w{0-i5}9V1%d`o`ZBDx}>~)1rO?d5_GfA{^P;oI@o-Xo-kEZSA*E$Ro zQ8mp@UaNcWVXo~W+@O!x$XW)zY$v@R8p3D<7Rpwz5UkOWHL8|I7ed`#Hl6u@4NBZf zsJQ$*%K=6>7+(Tos(@6~g9VDl0=liZq$JeuvR7zfNYA6m??Iqhrc8mQ3lp|C+uh5M z#aP%5ZP7%%Zh8;!IzaJR^rbQ5uMGbV^!p?5x%}nx{mVd&Nq4#ul@Ca2HM2arc4Xs_ zT>unMVG*OkH+kYoBCKp$pUStPJVeg+1Fw&_C9hN*Xt&6L6K!RCDfL5=0t+AO($K~` zT~Y4`HycN>)&lhzjl&C2o55+Ea()d~fko6<@L^ELVD(qU_^pSd>jgu%@9i9TZGsLO z$3bwV_w(IM7GGpsTwI+JD4pes*Dg91`++PLHHzRRr$)6}v6s|HE~>}oRxy6nP%9A3 zDBm97RX{Mf`MMiZW9jzMu*GhHa|}9W1J0WJ03xx=VcPSHyBl!du-Vw$?55J!*O%}S zMN=p!m0FW2byuhebCHIoyHV!K1!l*d*TTvRMVl9R#G3-*{H6cZ$YX$(f`Y>8Uq=TA zVK8|p@-@@)ehNfC(h4p;MD@ceEc%!JZKRb5Wx$8uDaB(TJeMiR5PA1W5MVpS!b<@_ z2Vf58781(X+Jc|5tg5T~I*hMnwKp6s4s4IA<5pRoU|{yyUr73p>;0g4IN>bEoG>5g zLL&2zG)rTv)GsM7kAM?e}s0FVv_6JRsc zmI>}dzM2OZ=$gs6AI(}1MPNFuHN2AvMLNHJ;r);RxH2=vp)~bM{Fv9)c7bd1%>Q+RU|`hI`@-$fO81OH@Ee*gaMM+mn;JIx z|NY=U{t7SJsN+0Bwfs4S;Q#&sA0lf)=`4)i58VIIDh7uarCs2kSk(ViF;-j@eJA4u zj{hjYXz5lnT@UPkznVhl!elIGs!$+rq5xLr1nd_<6FmUO6D{kPYO|yyRIoNwSMkQY z{?EG;wMb_S*Va$d@qquv23@zvm$(kX7@p{Q6q5h_Kxw(fWgXK)Ute(n^S?i*G)+)i zHT&cLT|{wt8Gxmo|NAmh`pdJ4|L;453dyjT*K#H*U|mobTu2-gDvqiC+oZu3ga|Aw z=yEkG*i`^nMcCAnT{l_H+J))K2&k#jV)iUk)S}`X?dK^_Rm>tid}bbSS^v-1bPi|~ z5eX(~&-Fa;zo|mkU17OJ*|XBhSK)5qO&B)?7N7Yb0+`1Iy($0?3S2N+i}e1IloP^j z<&U_7W>0&>zzkycB>|^TBu}_P3|oEqu_C}d{{Vd762!E&z+EJoDhB+bd6>3HSj+|E z$p`(PKYv2?=^l_t_f|Y#9$N_rRd8&P+6#FMtvG139cN8b9e|t$EePr^Pe@;70OZkyY48`Mqp`~yANDD=V1&4-0dEm4aPl7^$de_An zb^*B)+$s^9>MkUy(TO<;g6@Z?hgy{YNxx}bg^KuyJw?#c|0A`BK!vM4}41fz`OUjyA} zk2#dVT?qgbwL=fkMZr#W-fTPHo2{v=+-+IX+y|pwAz&Bt1LjY(a1JaFsQL3dJNc=^ zW=)CIDSs(a0-g$Ii@1``_9G9(BTIMaA?)m%heylH&tL$N=hjd6uhGi8c(h({gh7)6 zrpo7G-FiAYuF5)cnjhW+*Prmi(mH&}Hy^O1q^BdWAY@OhfOl5ZQVd5T3_q!W>IfPR zM8>+s{~tTFY{kdKpz&*L9=a}R2r>E^J|2RxNjZ34WlW6#Q}qF~;=q$Ah2I01&gQ_C;uE_>9z-zZ9DgV-!4eq0b4sXUMbAu@kf! zAE4TmKt}@A=T<<0u6fy+lAD_=k9}Eq@|1UZe%S~r)a5Zw8kC8QmA%AZtkI)wq-#*_ z+HiXxEfU*kBN!t zm6Y=Q_6!DE9e-qF)7jZc645wT(_U0$o|t9Go0O6=h^s^~0K!f95~*^b+qCliE=0`RH%CB! zAWmRpGlD)YqRA;KbZt)SDaS1$WdPPVXWv!)nYmrQiT{c zfb4!z8qTg`IzTxxoOyLaH^%I7V|5sMWlPljz6L#VP+ujEdH3B=(uQG>e*`yd1#|Nv z;3V(e?~~fz;a08iw9q@iO2EH65_~o+F6eaty{YwqUNq3ADF4&#q9BA7EkaT}DFNf{=c;0@q zW|?>e97kPU-IU{9R)XG;rqF{|()?fbe@JGRyMk^(uHcTP_wwa5^`Gy^lJ{TWt)F6G z!@++7%s8RMq>d6Lls|_yEnv=dku8oa(PlufMh+lvRjN4V+3`K)0ETX;eKr9B0fVnF znu4sh*ZiFhyR1TD18Cg>y>WufG-l|@B2I<<7qsNTlYoR5KiBa}qNtFNRcLRvp87Za z2JTtIWf`WRLbD`G+=n-on9By7)zMV>vJ31iV#-v&ab2hd_@WE6Lk&1LZvb}JdHMjd z2qVGFEi_`$zW}x_6s-8a;SjXN7n`9X^nn5Yf7ik5f1nB2|8Ei5f3Y)KH8^nQtFl3d PG#C{HP5C<6S0VokfQYY! literal 27924 zcmbTeRahKN7cC4yf(5tW?(R--*WfO}-3GVd5Zv9}9fAe7;KAM9-T&tO&huZLt8>xM zFx@>hHC4OTT6^tk!W88tzQEzYfq{X2k&+Zu1_J}{0S;^!XyD95#~lLj!tE@k;r!js z)Y;Y0(F9D+(AnPF&e__+hzMlj=wxAM%gVscz(hx6?(A&u#KXvF^Z(z#VCQJYDCng^ z12h6_FRAGS28Lw#ae$Wy7F&RUS^G3>4P+a7g+f)96^4Z+j(j2?F;(bGD`3d9qk%_ES)u^A-7wW(` z`EAEmx6L3dM4nFjO<~aaTI9?j-;R5A`xRVxo}!}SpX&)h--UZup3Il?_czA(=dSlr zJD+RZy1KfDO(fr;`OdpWhBoN10?r!uYg2*e6IN&hYUFOI5PLPA{h4))%Aqt zZ(pOjE_&cI97m|p(b4g)+V`_OqaqkPQ;w^<-eQE8qQ*?O2XO6nK8|tT^B>(mp=2|- zVw3fKQb$Hc7&By+3iz zg&EQBb~~&7IHKO^IOGI6UFw%0^zL(+*B$&g?oO9A+ua!2lDRjJ z0|JDbqVp=Tn#uJ{r)QH1Y(nsz&n(-ULrhF8#OoCp@(B!e9slK1=#3?->fP-3mwlnL zS_Y+$C1x6-Vti184w=d|snmX=1e?K)gEnh(<0tsY*kCtBa0j(MYWeP3_? z&Cg$dd}Gf1=Kvu`C#T`DF_WPf;+LCZL6eaL>c6S1#miL^B!^PYy8FAy`g=eRfuIB% z=fBsvy1LpaV+vb{;|G(nj7?7;y*Zq52Z9SGNEkb~8~7yB;}S6Kh}H9>IYI#obtdpc zBnr!0P8W%c#l^95w3a7gLZayR>yft?8lEEcsDqsb#9maSg_Cmzwo%P za`vsPtSqdoNa^V-w^NENWU~c2ctaRs$rA5W*V*TFoQC$X7<7h!_GjWn;)sG29r1Kr zW>vk8wbzY?+a&fKi6FlbC7?*NC?R3BjIS zNBo-?5hVV;|L#_two73(L+mz);XP{!V4dK*&e>mg-^bT;-^Ivs+Y0N&ERJzT0^)P9 zKentxG+#;2^OSvD@cl(3oahfQE8~Q37oRPytcFKNkE;x&$@s^CHhSCLuYq-L(EX{d zzCM-5<%oEq4fs{=Plv^i_g(K}v+DY*tGx*)0qDX~$=Bz;@9BGcd%abd;6O)N2;6s2 z^gIs9JMTNrZ#NFe;?8y@8%4UFW~=iomWZ=?&)csr-1t6Ls4|Qvc7Gim?P5U82ZWMo z4sq%J-*GTafH=_e?qm~K^JKidxXsIBDa^(^t9al41KRY2XdD|$%dMLkh5Zbt@y*lb zbqY?-6Ff4|p0@A@VX|NJN1Q|1W(K3>*aq_J^u#ARl#Dxv6+9G+??VzJ(g@Gj%|ylef_j)=fh zUw&TRR*SX7lE2qY$l>uSaNL{<^Dn@@Dty`GI{_zz0 zNxF_t3O}O2wWf`^Pa^%;ewqD+V9;m6rle#qE7gpknZCfQM%K`9Q^>|g{1*ZAm>bl#Kgjs71HF zC~9LB*z-#S>rzk&n1`B&j>FYHnqnLVbn^130^IeXE_^xE0=BQyikdx>^ zO(j_$4gB3(QsbC!Qp(sAKP@7vO2F#*uE3wj?X-6RPzj62jk(X`sQ%T0akNA47ta6l zY5N5gt)9ytNFs5fN6UE?D~rD-EP71@o#&&fh<5K`zyZco{$Neuj%g{`88XZWa!+h( z|K3fmPWuZ1KauaS1I~1#(po$rhS+`E%D>`;WH~h(JN(`Fgq2M1&(Q8+f#E`|)%bO~ z+-#iYcwWp}Qa19(AHsnPPr}!Mu(zmG)Fr;e7IbX6p$G(!iwpbM*cks)Yg|X4bG-8^ z(P**J;ccJ;O6_9~E`=DdYWagrG*whpb1iUynSKQo&S=Wc6x7wlm6g!|WcQ>S*9P>d z-#PCx3#$i&@^^H!cl+|DZQZ*r441+N1z!wY&flC&*jpF>pi=ZimvE6WNO9Wu;I}*# zdR|EhOti#auuJgDiVh7e?UW0C+iKRy<_#4nUHC6k*a5VU$Jx@C-UH-rfTReXb;ABM z(B+}vr;GGkprH{J%M{3BH?oUXuqplqq0$%c#1Zr|A^7$(sWQt& zNVc0*cX%r(%Fo4#u-PKde8F3KyV`MXRYZrHJ$iZ;g3B!!jI}KqC-qfFo{g{1rt`h$ zkG4$-+=fmo!@GG_M-MuCiug}Kb);qKp-9ICm}mkQgDnh?Hy^-1Bv!$^(RWoXQWo!A zLF5@Z5(l)lx$8PIK!kBa6ZpP3jEs)fE?U1_gb2#s*+21tY;&lbwQo~MiSxzD= zJnn3lIWlr{oTLeBlSNr*m6Wa2AH&&p+^P|{ZGbyEI^Oa6Y`mUcx;@NfzljxXJ-w9!gKDSLhHMJLQh$*bK&Dm2KCqXMCAt29w_4 zFTb#vjk@-M8Tu;d!v_Tg^*PhE}%ulPs6BSKt2L=@|7j9QoH#RiN|TV z{wvM`-W3_Vqknv z2l;$gL!>)9JIHWB0JTv993VcgXFw?Q9pe`n+3dLtmo3v(=R4VM{XD-tIDy1|SCQb$ zg$N+s2Br&TqDvtr{O?^A474y~Q^2L*VivIgK~R7K7Zlf22QXmo@h#}-)@rT-+~98t zGgyUgTN1PJK#jw;xOgN^f0b_A!%n;c6*cvu0_1#!rihpr6rk4*xtc(yS|5~G)po|S zH=a2<;rr%99VqclNJ>lDyQ~%%$g_@nJ)hfh{aVM}k*IAzr*dXAjMfrV-b4+m1o?crn_^iQ%5a4n==dhpY z%Hp*5S3cWy<8^fvyGY|p*+@6Wk2rsT$EN{OZfI;IKdp}r0>mJ*?F!eo7H8L!h3>*+ zy%B)pJ)3Nn2Y@4m&z&V-BKnMLzkWg>zp^sYX`DM`vp1yPc9jPX4sM~w5CVu=01UFc zuB5&Od7xXDNo%vYOWlU&;l+EhXsW2If7j9Z`x6Sj<+N!z>Tt#T{5*>YhjVglr%(n@ z^8Ec+`iU+)?`8qvMpU!W660gQz?PfsiW?ddP)NmVT#nWLL-zXY*IxkIGa2>-Mme3e zoeO-N{p1X@3E*v*cs?M-p+b7Q0b$t+D9JcSxzeAjMboHL@%+E^Cfd$BM}K`~b-P6fgpW;VJzGhD!HNa)q0Jm)cXxN| zr3P3(rwRUK;Bf8Qudk~sDK9^cy}V`j5K;El-+bK9ro=;pP*hU`_00=h{DHs^+HK2Q zE3wrOz1?w6=E4D>2UV)Nh?8vg9wR&=g7$ieP}GpSmv<2F?6BrEgc{jV{~MXB4kZAx2>zMCDaIRVuF zogJ(yZ4d`;pFPm{GVQ4zF!?#?qWd$Us1o`akg)wg5_7}`SpE)K8Tzpeb%8y|K*hN% zxd5@W1}9K}A~4(g?yD~lYuLOX>$(sUZpbu>xLo5}jW~%oo`(0TC12N9j1aj~n3k9V z=+ZjiBMAaib8`bI^4u2TVov^k!Xp+PfeP zUxP?JaP==~G~cw68vP=qFh8fJygRW@rx$l)W8?F!KPy52lHf~>;On7+X#P~dr!N`u zl=8*(#UV7YH#Ev)i0Co2J+?(vRcC>uiQBx`=yVdkGK%Xts8~ zdFwGeK7MANND3?!LKJR5Bdqr0+O>BB3AV%EeYtdwFJelH=s_l_CbQd@7gYN6`C0yN2L#50zg2w zT#e8!wm29UkFR!k-ZQ%tFC+s=b|N6^f)NQ94}AiV1a7F(n@)MECXOwY*VsL%1Rgd5 zejo_}3Yz(p8lAbfukh|9NrKGwVjOE0(qoeS*mW_LxeP41<9hx#V9$IIsXYI!3Xpjh z3%*jbn*KFd{q5q`vDTY4KRNkLUVdC?uvj+rXC4*b-GZ@f8k?59{RhVx!0(M z5(WWA)09Ey%Im$c<@0?YN!A5TUVddYwFdyU-t+$K zHPC*!$tI;}c%wVuK8oAcCknC<6;233ERvf^=EnX&wC0ir$P zYI59_#-!Kk%M**>i2_pCXn4lwKipf4fS)_sq0SD+w2V zKHDgbr>J-6g3a5c(CC)q_z&_%a{NwI)?v@uujSV~j_E)62=GmK zq9#(90|2Wkb6(%KV|(Fau67f?BR&d{las5cs5rm8Oo||#sz<^A+MjN;r%8dk@sd*E zo2sf>wbrn4G@mF)9+7kHrm^aGJA=f3DE#$D@e`@BCZ_9}J0&1BHaqUuj5q%Hf9}{T zE3Yy-TWJOB+jcksJsYl97Ze2Qwt)aA63|hA-Nfp+GZeKiH`>zxCT4iZ-4Wc(ckn-n0QbWKkWV`^ZfsO0OlBz0b(XyX_ zA(jKk1t7CYV$_!h+`bQA;D2BFc)f32zxXg)?bA;PXyn(d=wpf`6MRQ8m!pE=m)&8? zmYwNQuIq~WwiBGTfHm4Fz$I`51E~_D&zclJ^TwY6pR1~_4t0MR>)bMMDwWvyLigrZ~E9*H!<;_{o)f!Z=Z z`&=7f>FVAStA9kC36Pxva!`dcemaM+me}6H)V|_8}{=Bn~ zbrIbqI66nK?Y5a+jeo~~_H>5-a$SfOb^hr$;1&MYYjYZh-V-Er_EXBy5mpyeIVjWSLdgkaU=Eq{ot);Hm}Pm& z4GOtu-2ow!c)vhL-j3H-Lt~@7Rm{%pEE?h@u?6To^psAp@jjwoUJzTLhG#` z);psjMv%QMoMD~0U3)ss>OL=EWdUENya3E`K`yRoG46R0s`s%a5Nz3lw%-Bzj0-*b z-`W4xll11KN{>T;8xN39wt8S%QEY6i zkc|yJ5f3$9)h`WSV(gi)fiHMz(h7seT1jGTsJWe0QA`Yqzx93K^%Hjh795tMu2aio=BQEo{*dVCPS`wvlU+iW6BPEU_uG?{U~qJCU@ zx&$yW0izNC*Es%P1yflvRJ!@PGAm4)x>KT5mP|yH?03*E*L~#(h3&`$l0?X0M6|TW zHPt{CBAV~nM%CQV;QZV|7DtML09{&EmIstXww3ht6^xBZ<>ch>-<#eAkzq3I>Hj7) zLqG{DedS(Cv@WwX(e{ps`?a%!kd`FIR5pVbjs`Pmf!J?>I0!Rojm6=(bK0Fp-FzT(V_UIOWP9 zP%O3b{xyl0SVbx?;}ll0OFMC8B}f|dABm99b-kY2T?4+t_2nSpcV%VAg?W5S3LPyi zX(D7Ie*O;1;50t1@?1xDNSZ&PKldPgsnH`L^o8mq=FJv5JUI!yZ%}~oHoHBVtvK2>;4q_3m<4S5D5aF@9`HIl1(i@J{x9i?@23sgiVH?Tl;$lP+)1>vN zaD+@nlYj)0BN3YMy9VWhm;L8UH(eGkcx>G8w%)V)n45KtwN>c_Tbz3wmW8u5QP$k| zGuv07L4$y{J4d zt|I96K5jElbD7h0q^P@6*p3ZkZO zFOSb*>kpUPtp|A#T+b)#p^bUqdJyVMUk&jKfc3nkKuef{|%K?6E+YJK&|` zu{DTkwm16sYvYz!Egu`Xn$wWNdHnqm~N0kRCwMa(1295@r$W{SvkUe9_NR%dS9hrN3rdb|G)Ng<%r>v?v>+tfI zbz%povTfFlGUNm38%BEK3-FwY(1C^%ERqTQDafEtEE$57MsGgE!aSnR*%KlON>fjw zcw%`L*=P-GmTq}b@>wl=w8~LhlY%nYge7m%^`z>EEdEM(A;if0H&(g~B*cN?=BH20 z+cqnR-5!_Z21hm5XxMJzU5kX=IF*%^A42#)G3@RMR584^ao0Bh!E6e|pcH|j?;r^} z@4(O$gpHv-FVnU`nt?0qyoHC-Jp%7A0OT^w?9+Hr%sKJh%ouXSdfK(=jh=UBa!8~1 z4g)cG6bU8;&eTl_Si(}24kmuQ5J#NCbI8OVs^!F(kjaFC<6X8!q)?dkGdomUS_tC%x|Q&qG@!VV32GQIRPRPn>FbMP3~_a zR+~*OkSjx~e9$LvT##x|_(Y%g)Zht`ZHiY@er&j?{V!B2G&HbT#BnwQUry8SR0A1_ zo)15)ATV=i?2r8D7_c?JEy6<2-(VIX-+f-{?Mnv}EuX&6n%u0z~AiUH!!RD$HE1p*lo1 zCp)U5m$}7biV_+i?!LT)ehHw*T5S0{KB;L&PM<236DTGK6wuho{th-%j?~8?eghx zC(AIg{i13QbAbH{mt)PKfUj$B`oxt1czG%1s?IP`Z&NQ5bA3>*-vh7lXYfj-3q7uaDb^81jc{ZNJVed zS+Rv|HHh8z!f%3uYO1=8Tu66{+N4<_0 zU{!{BW(k2uiw+@LLYU&HrNp-Qd4oDchbC;%_ss>SR}y2%?8P6>yb{GTCyNe-U1(O? z!E4{hyc}fA=r8|@yb5d6xG|0jE?fpZ?p)E}_0~8zn!X%lR>C(k++6g9yqb`0FHv|QnOh~r ztwrhtO?)}@JzN>G27Wt)^p#W5ZCi?Iq%dP_R$EX8tRR!2}s#8HOUlP z2^6mWq@>#d-5a^psif*~&SnkOA3SB)U{*Te)@U(OU)J3Hhs3WWr}VFD<@CwI0-rG;wdMn+rzCh3>fl zm^SUzghw#CS{mhqKzz|ZH{pE3Gl6cTqU z2l9?{nH=xk^2a_|E=AZe-tOt34cy-gNUEnKJPaBWgzME?U2E2m8_UIJ*MsRH23Cuo zAjTJP*~p||bwG*12I##Fh`LVGi9+Q`-TS;&m+n`!r3?-epO5>x)N~f4_~f5v&zO{#t)S87Z&}rP^K-Z=)}o-mzaVD#`mV$4q}C`vd;LM`A%l}6;6&9D?JRBGWJL3fFEB}y&s z+GVKz0yNk{%%k(9M(+}}mkV2Jo$pPd!EN^|);``s(9+i&k`rho>$vJ2alKm(3+9M} z=8=8F7kD+{QVTr;tBq_NpRv)GU~LY2$hEqptWo@0I2D6D(F5_ZVO`7jdQ9qvrj9jc z6T$>L{e`+U?Z-Ph6cCnea$S&2bHZn1S&p_4p=oAw(t9xJz=C2-wrdYetn1+}sFcn? zU1MP0oNSoA8&2Fz*#Ic^$Q1`85srJ`uS!rk8%3g!>xw>$yZ4&fKNI`1?I2>6x9Q6C z`-GxkRkOQ7y|Rwb?x+UTL=Z~q~4uQS|y`KD@_f|xcv$la@6^|Xbi=TyPoN{j!`rRDm$c!jH8>xd}5=5IqVG%=? zPto|me2-@ferzBTM8M}s0PI57?ijMcu};ISREYkGTnd;IlxI^7Si?}1NO6r$-oMkj zC~2e=&+9i78cLxJphCQRpjH&3-y~JRi;_GrX;5h2t8{wUur%U>cRYL@7E~Ky~*`(8uqN} z6uKYDl>!aEI*!SoysE}g&K!_-@5eRgbS6 zTW+BgqD+WzDVTg#Iz!Oq=vd5W^63h#loXGYpQwLs^(^!E_*3+%vl8kHml-4VgJ zOQbg-DM%kA^vxI~@cadAys4)hpyb9};vBJ&FVP~ugMjet^U>r2yX4R!f=?(%Jn*Hfa$yX_(UQWSE9fw`VDKg?Sh z0zENNm4fyCYfsI_KrJFcI}=*JwAb*W?$(m^xQ~)`E8|w}a*1^mQx%(%A%!74h7!^; z4d^>^b?|SkT1M&e;qBf@4)H=fD z;pC@-ZV-9>UadFb_lN(X8s%t#C&6JG36xN^n`ZSF&%v-EbZ%!zv{rtl!&<;28KNM- zTq9xiNkBT`&A14!$xOHX9R_6gjzEpWeo73B888qWtW3thher^AXzDoOzE#fuqLM3}34T(Y0O4t5qBGz%VI=)NQgQD^l$%9OfdVJqmeXgzH$? zi{L9R9g771i%?M%|D0s7(nOjmr;93Y5N94|OihrqHtaE}=1!?iNm)`21eBtNCBnYo z9vPVJ+#a*?BDhB0Qyi^GX!aPad^QZjF{N`VauciZ07B>EnB7ON1{48@qkUhtqsm_6 zY{i}6F5%h9)kG*->V=}G@xq)ouZid8($EX2U!G|Rj(idZ&f2`KpY?FoQ_Uq*H-aWy zM2mpAHz$i}kU1qHz|;qeRjZ}^*CO}#=@wffCmnOXV`-$y zuYZy*QN8P=*RVO@2tC%nj6z39Q-o-Z!ZK>4WGbQecoMK9EVa<$S;vN-(vTBvFdy$? zT6{AFCP^zeK3{QelQ>)dQg6D@Ue-X7mMi*8Wsty0Kwgz~yfqe7qT zq%5&CNPX$L@1a|;-KeE)_wj~)V{2yeBBl8hB#SmJ)18S}C5r+@l`sAsQB28xnXTzx z0}7`sh*LsyIY~ZhE_e(k+nNY&%~B^kXyoh2zL!M0Owu%*Uzx@)HP?&vru`P> zS8S+==!)8x#g?Bbvz;aeVR9U#^0BNVpk>9TW~v_YR61?*%3!(kKS@y5tF-~T+MPK} z?UlV4;ZQ=O!`ZjAp@wD&b@8;;qu-=X5(fgpHi$$*_3sRnJT=54L0S6mPxFwUz-@mY zQhXE*($dp4fr6a^-zDV7{)6+5djeoo+pOc3(Xf@2RN6WQwMxDoab8TPAy-$<*Op#X zAM+)cmH4?&9>$O8&~@=g43l%<^kVM6@efU}!m7vT!HU3ghGP`dqLm3^(boL%0&kEQ zY*P7@WH*0bbP{Kn{0y*V#WQ!EyTZZ=+Hko+M z-$8@6Dr%vxTO2SPWYAFl!CcxO*|`0Ku{x6b={XlMacFTDhHB@sckSMQoW16!8x{e^ z2~Rh=;~RNhN*aLoX~ptcP({wa!FuZF7cp7H8JQewmIS#|SG`zmFewOSb3l7>?~6d+P_gSIJ8c z1|i+#jKP{Wa)P~A_*;dw$STz+g`+CGftCgrkNEVzJOaJ*jc2ZctiIR#8-|B|6U!$? z-`(TvbMf`e3$T*l7EQ=84Ou}6t8s^yZ`Irp^{CsCX6moCqH!0%!Y$Lf$Wc1E|Hk!w zrT&sBB68EZX2bz`|9FYddZatP+xu&|_3rbM)k}9v+N~P*yRlABv-PzS2J=*&1#T-n zyEUhiroL{!C_7xS7dvwq!cLg-5y6T?|vF1K^8^2-jFcF>A@s*Vn5@b z31}H+|AQFLf9czorAeZ)vajNHO`DpbWt~s_!=YPHs{~0DwVN5P&t>vUtbU_a>d^W! zU)TPf)EzyA?NQI^?l7h&t_S2O%fnn3sY;U;!l;tFn>b0Rcs(qZ-L@LDXR|EvQCo6Y zjg!& zOdROZ%6K<&0aZ6WIXY1mw{cIB*>T@1kXJDB!S_1 z_P`dE`#y zjG7J?8#rc_aI^;heF~xuaKesD7MJB*R3j{)cMQzi^Y$Y^5tc%lJfrAF)xQWIcs*xB zo}wS8Yj!%63Y(3wl=+9+81d@=^OEV(ifI=_#jD$(uPpjQ$M$zT_k7W2ZExGM^O>h_ zcF#Z(gJrV#C!?U*8cmhsgRmbrcPz^EiAG}cY7q47lkM?kdsMEk+M@LW_J#z-31oR9 znR=IEW(qIqNt8c7We7}_Gown{{s}06uf4m=Z)mfqUunljE1azQhiOcS0$D_M_rexZ z(xbvcy}3k(-c1@#LXZ|I)kLpT!2o5@NXp zujuTXA{v|Ow;7Ju@yXQ|)_#KZrFnxvi7j{q@GTp!`&;QxC=uUf@LnD&oDCo3?MN7< zG0G+FIMAb(aglSVdd6tVM{d64g>79;snu+(?~(OzQP1L}TCdaS1f)UWW%a$);jeHj za)`kgX*Z^rbGI9H={ou4ITrp-K^Z|bT5wnH^2(M}hC))J;~&A=l)d>wfHJ(Y8c*X& zfufCL#cAC~un@(nLeED~BqV~p*C*P!k*u2?SKe@fA(W2B_W6q?1^qaQJgtM1c&%`i z1pg4?i!EP8;l=gYf}Ca+8QL(pHInz`DG|SbDEdEycJP2E!tYCD&adtY*%`O zF3E}_kT2Y_ATqaG8fmSgBl#!kAlyUO{Wk2l!n)$wt*$*Wt`Z_UKC*CEJNIVK`z1Pf zju&lb#ETujnJ=|O^frcP59+K$N1;))a#-_QjA*RylHX3EA0mi7C%aMdJT%l258(sT zb1WM}EfG1Hn`_bWBT=HX#20D$`E$zrex6YqD0H`4-)H_Ms0_`Vb+N8qkP}(x`vu+q zyog$L+lSSq0DH}TiVwENu3#A7>MFde=zP+ri8W55R$H-5I}V9srP)>=jzAwSr$kHu zPCCHi1Y`Br-yGX(_LjM!h!ap~ zQh|D!3sizL)Xyn|YwASWr1)zdL)cyi>t~mx)K|mT#$>9BHW0%O6Rb~X8Vrg~c|6r5 zR*N5(y*%%H4h_@pJ63#nPGnGldaLCb*G@;Q5J|C*SF6J8W?f}BMqm%8Qeb6R(V8OC zfELuA?8(9m=;oF`YxAn1;z3mG&?<;)sTZs6uJF%G_}8g{*)TU$#!@QZf&!vP(Sx1s zM&yqm-!ua2+|i57BcpIj3rOKj#@GBlJWji_q8udinYV?hE$MqhrGch(w8^wM3HIoo zEP5r@FV$v;;5umdAhSsLdxuzIfiV>vAJD(XKWBxjM+B=i9Y6Py)atC6m-yL;`^O?; zsljwko_>;;sqFkcVm7a&z_ZTpP8to=i7@OPJn`*v?LmZlVJJcz7gx&7J%t&I`Hnyx zSSl}rL6`o87b~FbYHM0L$;OIo%w!vDK8pr&>>qJHUbuX4MlQ)A!5s&2BxIoHKU=IR z(73ax3ud4vRrfz-+;u^@8_(ypWt5&v@5mL@yavy&bnm?XY!0%HZOTzRxx{sDR2yXH zI2PQDVkJ#|u4IB*A~`16h&|2_P|#ToXnu8wW4mv2)S_~HxwPBnAdadllRZMU4IG0H zR3}^dX>#;Z?~V-?-!Y4DLH4AylyQ*72WkyrrJLw&C5IDXLBGN`l;(#hxr zp^daebdla$%b4-LI20mF(gL}$5UP(7H3mLny{@BRNn}ZO~J;G3s~w=6E_$%uRhqZEe@| zaGK|B78n-k#!sRanBBE@%P@V{q-^7o$C;P=u{~&PF~G1#)tHsbH&jG)>?_ZYgz!1( zREQuywQ3Y0mPHXU@XNa@yhRZc)l?OV6*3Q-CY4YFEIY*Vid^gPeWSsRqXrjGO6Rgn zeoJr@O+AX_mdl02j;0tT=re0C#K;*PjEuvz;8N}d<6VkC!}Gq0%DxsgPeQ-uF{yuA zZ&CY>iC~RI+4NrwA9jWr&Na-iJAtW05ew$^o5Sfz#$?K(2 zXhg?Q-r-b;mcUmSWUrhRJ=O1HsYTKkNs2`w3H20++zC7+wE`GdGq-y@T?6uo;xz*ntHuI3LLtrcR;OMm9> z_a2lR{!`srk2jS>k<~;kMV*?9`j^sDcxid92sVCiOR;S7vkTJ8ovrk1i%LLPzbzk! z)qA4i@7CfA~dAG(UL1fd3;PWUx!wRq|U78XWTnXQZ zu|JkVv@a7jRRq=33S=79!0a6({?D)NZKl)077ismBygEUgOWt5jl{945l6rmgJS{> z#U{(TrWN+s{{B7e{{6Cn4*F@!fZ8bWcpTYQ&rY61_eVB*m}EUwX_6k9OnD*C6h|!A z`7rA9M)0^cd+NWwj0YPBhVfFY=d~#{=UFl1J&gebT=}eJ9yjs?Uc4*uuwkP;B1M}7 z54rJ+vw=(v4ecw9Yf};^thvzHL(LCm>73*0?ZW$Dm;6Vo z9Dr)57X$CDO>KVmWCZu7R{k&rW<;&mqa*O-!djGZSs9D>lQZzlOFUUrMY=_GRh(Mz z;mFq9x}X$Wl~Z@ANXy&L??J?D>M+DqsW7C~sH>Pap-jlGduzB|=6H}+L52yL6gqh> zxPhS_Bcg027-(Sw1Zag0@X`7^MmO03?Tu3V0r)&K&rIVyRlSiCcZhE;U-C3$e zd?3b$mo1(pYrdN`L=XpV!h@=hBgli~eu_TaG6Xls!1T+CXC?lBY zZ%lS*gq7tz$90#XQVkB~<@ zqf-hSID-rc+2rQ1=bpyOHrKAyb+HQVY!+pxZW_5E-BTU^ky%X-V-~+ta zh3kb`)9r4kJo&BOWQ+~DYf3nKO5rZn&*6-X-udf4!uxz~V0q2Yi_HeYJg?o0Op&4C zD2tmN!@E=qF!}>T@xLCt`TwFG?%S=wU&RcU_s=i{_F1iteNzYwvyfxsjevEJ5Q~LQ zPkGU;U?7h~p8Zijgp=tei2GF<*Cr|6rBbgVaiY8EJtKzxpY3}FW78>#wuQ(H>d);P zWr$cE32uwC3--c%ni=u-g&dC8zU?{U9bX&PYQj0U8LyzvwQpMK9qO zZYk_mm|YM3jKG#HwBAhEH)VlWa{R;3C_vpTGAdfDQN>=H zyyopfn%APg6NJDZVKN;QCJsMC*nC2mz0SwtM2SkzU|$$yFUAbFqQ|CJi#VgB!pR=O zl9?nxj^$S=k|aAo18vBnJr-0ZzBnm>W1vXRAlMGAAw;?e!>syPlH!o7VVK-`{qfwn zw{8xa{vISD68)_p5#=@=wnB4a|BIrndJT7b&eEmmC!I2eP4a7Pty5pXz=FY1?IxVDI^GmFxDCsl{GoA6}Yv zWG%Ih6ZcA@=i%7!$hE(W#W;tFp!l;j!PSh458Ev4pALB8J_J$}OIy%iNr&g?%BX#T z*V#X)Mk^Y`N8xi-A!&>c_G5@|twmo{{RQ>5#KNG7rm0qP;%(8WN#vGI!uynf;r08Z z3Gv9bvH!}yHN+VrHdNCu7GoyBG#4=j>U)+P2_bz$zu<-fYVV0TZ{ zI+L)kt-Z8)YPij?Lh$Sa9(2!l>P{x#X7@>m*QS9Kz?@dRA+haEq2;2Jn>o^<6^2v+ zcwR{h%J}a8>+LS1s_MRlfeR=g-7QLkbR(s7$DtAF?na~q4tVGWNd@Un={R(EDJ3Oc zB8}d4p65UAxBI?h+z zic6L~3kFFVVwU z9vDbz0Ff2GQ6^jiAYSda1#YHsF z3yUYK;Wrqc{4hW$L>UcSo5GQ1wD=^_U3BS>P8=FZ-_|^@qOWQ zVzBf=Rs3Qf6W-MedA(62gH=LEVb^Q!7FBxwaU)`D%jB5V{FBmO_wf7T7oo#^RR*p7 z@10gqP-yA>!_rgG1APK9n{BzW|bu;d4-pw?jc#KH8e6Ronf1O~oBUa6z2pyQfa5LtggyTg!yinlR*iNZC?rQ+NZ) zP8HPUQ@kZHJ{&*Mt&{@teI8!326~7G1$O)7HirsHA)2r>ib<0o7D65c)+e*(h~K)o z(w!C>?M#C-%SqvwksruPN2+MMcugH}Wr|3#>x+FlUZP!^Cg{8(fyC#JG+c9a@wHx2S=A(hLjwjf=?VGd;Vj zhVDY}Xm6D=^!*~oMbV@6S}o9De1vKL2;TEWJI^i;k76a z#DZ7!y3QgVwB#B^6cZYe&EiD#YS`0%FQd)uX?%T8#szh}#wd=k!J4xp+bQfn?EG27 z+s+nJx^sE~sv83e3@#4M4w1ktkkfcHpbQd7N%Yo-Dh?dWaCFxXkq)zaPq zNeF0&?1U9`jeD+Q3`1gu?J=U*f-Ez}3<&B4A4n3z&E`bL3+SL9$|ezucTPritf$4M z^G%ZgQ`)EmVy{C7B$2czcYczTe;5)t@k?>{k$??04Jb6&-(kPAzDIljhPxE~%PpKGo%Z z@F$&Jnaq9cEa2_lWadGkspJ^y*%)XleefMhAuU4XYo^An$7C1pWXv2v_jOM4kS6_l z5As71TQFjXe3GO=pvT(*#=LtmfW?V3O8^Ze*8Tzu&5)@pib6m9#fNGxdN(G!_uXvs zCiBjvkL%7UerOtBhQcYAqnd*47lZ1-a|3lEW(v9N@iAskCTvp_&FaY8@olSudqN_% z6#ec6E8>X9>G<7Nz09tmdd@`E4(^XmN1wjxCE%`Jx>J?&UuiV0F?-9(ioCm-{=1Q` z-i(LLF~mFVU7vLNNxW1L`H-8dVF3(3a)K<1OJiBaE8q-cptJ;Y8?Yf-uSz{j#T&j^c6{p0ukXLjF~6|I3Gn`-VBte`+bSs?zbC*M zpMn}z2(32vYGc2`Fxl}4vwpsr@p~ps+?DfNR?-)J=)gtZXHdHni2+6R#g-tmd~j2geG|*P8vJJ}Z;;v!d*I4P792rSL%eH3EAuGDBjf zyABic$VxBDy1Q};dq>B$dH2N$?Me(TLlH$Tz0tNR^Bh{0N(hrqd*RMFE`G~aQgQ|& zZ(YFFFx4I0KXJW<_w`g3%aD2#uS}krZnMkZg=J;-de>QWw1P*Mt)wJd{84Fm^(G-# zx#(!pj`GuZyf(ZzzcZkp!!rnN2|x5T2{5I(u6!diKSF(?@?OrmQT>}cF)=^HMUS|c z&~dh=+wZBBoK)QE*!PJaIQNrLrWS=Sa05PzBnq#}W4O7q)bhX0Vc*2*7qh|0o0gK! z1RjPh+`#PPn_~8^m@&rEP=1}<=6G@tRj$_ho*+L)h5fm+v!HiJ->};+`jzGK(~GHk zhYH(#q4q4kHX4ZJ=rn}yPUfC!ec^>d@fwjOAy}HtP08{W9V7crz4S<`XXRZ2dD4cZ zdQ9pN(T?E)Q?Bgob0CYbRx}1fF-g}wgb^X$i1b9t>fytCYx7R7#Wngtps|cF75*_4YxyX@Wa0fj_G$LZgDP6|38IMjEl-VuEgdqlSVHZnt7v{mg(xdJ&T zKAtU%ETL_2;3pLY1dD25V}PtyJwWGxka(qwstcwzJXyo+9r@2NG2ueoOrxWm_~?10 zoFne5+w{h?*%H-L8H=80BqYcbwqYg7Rlbi7-q2$i*TQ`APti*SoPJKmhIYMlh2d6q zXWL*~lHS7NtssF!G7kxarY&Y{p2BO`R`lw_3txVVaeA%HJiOL1M(HyHAA2vK+0|t9 z=7O5}K{+$ml|uy2H2Da%-Ha$CVq&|;T(hyK8aZe_dfHZVX?;b8*emWs7~>r3XiKk= z&oqb40xcs915R>I=57!+Wsx5*#hY5B)Z@F5r56$XdU{x4+Y93j_hk6=(UCS-{R-EGH9GnjRUSll&;Y`XziGA67#*o@&8DJKDFyQ(aM- zal2c8Y-zLB!;eFYaP46OFAN(sA^3*RbJeSrrAOoSxL>0(W8nLe82j@FnZi?L7n(dm z<@{o-Na1#Sv~o(Zuyd>orKBJwje*85_OM;XzY5zKSQ;tgI!J z3);!>DC$;Rx9~ioSnJSoj8xQeS-7RGyIaLuCMe%TMC$x+UIO0_KDiGwn%^E*bz@F7 ze5LUc#Soy}ckI?Pi9%VEJrv7~__)BUWuwJ>n#n^}*fI3*ypFy7<1kfTx|0FuZ^O zy+2VbQ;HUJvnJ_`FaKVTH*Kfee~_kq<9FKYcX!$k@>!UCZNmGH!VV)1KU=v64I^EpHe>K^7M-iP-w3-t2z(vKl?^=?jZNvkY3 zpyJ;VKYl=x<{CUHG+vh#28HYI{fwIPV(=p4!{STFX3DF7e%H`g#i~Y4q;}xxrM~u- zyLmv(-+*ZOz@I|{+zCN9H$EKgSL-pV4-nZ{AbzI9C-6}2E#qb^s;*A1il*?h=78^x zUVnUwNgrLU&f**Wf#bnOs@2W@!co(e&@+l72gIe&GD2SLZ-_P&5eHdwTqdLzJf0k7 zCdvIdMD+I?L9!Fiko2^vzPiZPbuLdQigUTSuRmU;)W+>k+^wk3FS5j6l0|AJZ1s61 zC7Y)vzNB!I;~ANbP>aM@C&+hD4oP&-&MXeV>HOQn`K*Y(*?4ENC)?D_Eu7`dIwUu{ za`)u)>yAfy8Q)nlW5p~E_r6^Rn|ijX^3zyxl>8H zZ~vdIc6l0Qo}7Ri7D7hlHE=87I(^LmQJueTSN{e$cT}%L<-WS!&8_-s`ymO<__H4I zbH>7CySvMeW|iv=f%R3@Rymumd-u?pn5crhADT(%9NTS_a_S(T7;Cn@;Td7UdTIZ_ z0-K~MSRmXbvOz92MLFqFy(ZOkjkmie1EPBb8yos5;eX!_%7>A91n_{o%YsKQ!WD~m zoJ94xY$(Wg1Hx{x_YR?YME&-bRWV(b)|77MBzZK zDB~Wg{_I}rPpvvDzd5{(|C@vm?B|qgukjejlt5OP$vizP>rqk-&HeOIOG+jvb`24& zuK?%5LZpU}TW3lDAOScEmVku2Wg03(-i5lHVaKI_jTLEls~}-3bZuz!&#${o?oI`z znf2g6KF!4lJb_V`iwoM(PiRPq^(`4M{_bJ~b4TumJQ4LX?OX9u9rK`Rgl>Fth}ii6 z5yI&+nlFtucJrY4mMbb8Na$Qv(Stljk>1@#ip@0t)*+ZAhO+UH;$wEVT#^@M1ae5B z#GrIo~3Prt} ztbVX`@Mhpaxq`!^h0L{c$RYJeP_85@7B%V%x=%Y6WbV=O412vq){W$lwY&!Y2-Az~ zGkXp6#7@A8AQJztcGUtEMX9Z~M?U(2SeB?KA_9w2BH|c*ACgTkxK3THBL|v-s~2j} zPHuED$_ZGo^aRa z8k`b$?r_Fu8;r>tCt3L-6)a-1_P)_qzSmGMySA6Q~`p=jFm?{m$Bd5b)Wc(b#Jr1vzYNJMccnNqE4Or)?K@w51J&n%g*>2{ddjN3C2 zuz=-o3KEy&H$JQJ?B$j`Ns&lu{CEBr(h(taDh@Y-PQ=Kt!A>?xr4G#jL8J1G(P-{J z&7yGQ)MI~4?e`)~P1lDWf*j$`&rsN9%I=o*&3@|&D`@2@u*Q`qYk)Kb??7{Qcilr8 z0>Mcl=x`sg+vE{-M7U5HF|OQJ!9Lm5pX885Ks#IjeWuf=d9%De~{l|LROxbWzax5k=a#OHcmb zm|XY;b);a$WwncM=+C(xePieKnywzn=nSStGRPW9)((FLItCYZJbjKnW2gXiSZh3s z^*KS;cDjzyU0JYUG|j6rWM|Rx%o?`w#~oj7^*;r1E|y06JD946rH<;8&m5I~l6NL7 zm6=8LNt--aO9iu3ZdtHN^eTw&u8v!FxzBb_HuL!_U%p~gj;F3Iu{z6ri!l^aO5>`w zB7ijEWY<-+sBs+R4ZfXAlx$r&@BW81(bGJFZ(ifpOV&K{ zrAJ^<-E~2zi7oL`U}lN}#^me_s4qi+-mM4z`5B1V7?_>S-9HEOvk{PuI|6{HjLk_; z!b50dNPX^XG^g+h?^Cyu&P?7{l^bnXVUnNO2kdD73FyQ9aa*t zZR8do6>OQo$zmMotB!hYznXHSVS=M$-mp!Vtt+#sWy9NiQW$090mX_1NKI={cXHP_ z!l{+wGz}7TjortW9$glgP&ti+@JSQ2(Huz3_V^Ziomfm$ece#U>Lb0b{$3|;j+U=a zElZqV>t{{q6duN|V8jE?MjB*~Bf@Y&H2711y`q@_W#bOaH4x0|p@`E&_v8v{iJPa{ z)*PEvZMz>cGXZOl4P~9yD`|Yx%EeBDXLhl&1hdwB;VzZU1QiKU8j!ZW51HPeePXNH z#0S3Hm+Cc@e!O^a{n(QHji(zo^v@B?J@KSUO)CyngOQ2#?Lx3lM|^dRm{#wbhpi|l z)+tiUebS!fhD*O9Xd507$s82-OTk&qF5hpzn&05-X;XeaGcZ1ilq7D6mKS*svd9;w zsR~4gGm^OFX+)9OhSvd(v;|gZSvlLz2|lZv9h*-)3pmevdK&HQhZ3H5*+9MPcHxLZ zjN3h?COyoc)}TjTu!Tu#z08P@v^PegqF~OB=MkX@0zN??qUwc$x{xfpEl;3S7>mo3&zP;H}k(gKjEhFqvkeh5)`+M5h(U+1r=)HC4_Vt7gYe0oQ zs|IZ@J1u&r6KVd`xd$^9r;eY>_pS`ihZ7mBI>CmE`fsKz!RU5z^GvBhu*Sjw<#XDO zbOj4WBATV^k0Rs<5M@9NPCoPr@C=$76F5Rhn#x%0 znn{>gf+6IE9EbFxE24}^!Mr-t)`!ntF84MPyo~%Ru;ytaCZ+ky{Q{wluh>XC2_nyC<(c5!lVc z+JoC^A$-0|^%EKlOQr7(C8=t|F*elVMT^^`Z1oQu(5oG+=|B$nBwm!KXlmm2TG{8O zOmTX&K+3nbsf z&f~}Up`OX>ud>Nvm-98`$D`eu`o0CN>7h<4=hR0Uo~SIL;WQ+X2uYR8c|~-4G@~CZ zVPE(LoNW%i0gSO9p!pbBS?lw@HYeBl;fuZIl1-AeFDH;dEwcs$8j>9rmPQ_+t7 zR>)O+g~3erG6dH3!ml+S%V34;&gzx7oU*0^`3_`&8VhCY*@wvH6m&wlMY05EQpl|T z5{^UqQ0SR5MF@8ZN<8&1#vmg2y*CK8*K70Pk0fDV2Q2sJ@^Z(HpG>Nm0|+A!Vb1|g zJ1$eAfZ_)u_w*<=eXvOlDh3VTI*c*@)fX8u@0R%EeWT=aq2G>>xtYENjqiPP%>rJ6 zhpXzd;LtS>e$F82M{%h3+bUH&kntoeH0<5j*)q0{w^DWE!>5Srj*uk{6Uit$|A3we zAvBK-x53%x+k^BaTk$x(B2i389Q7sW_&(Ny^s%+0Iwi%WbvQ2)SB|s8-6y5AqS>w^ z$KBoiBeyv!z~X3tM)i!7lT(3C$@E+4*aAUq%SBL-moiGQr#~ZAE9Xh1;QVLdOEnjc zQ4ikch4G?XkFd>x-+mc+bb$Ay5+Htyd(8TT*XLnx6c5KLtQ(1S-_&!??MiIA8zE5wXX-pbIKVwT`r zX{k~)QAhrcCefBa-!=JahIP?>g8z<)=sc@v!W>A~vZS}RSiK86^L;@FN2{Dkl8v}I zfq&i|JBMZ1QI+f|i=;d%=lAEJ5}kdwqhvlfaKhwEmc>|*Vd3oq+3Wcs)JbwCit&yb zTIGa4%bxlE`O>x2vkqs}ZYoTijG^K!{kqT%Eq*!Xv4fR+c#_c(LW(wUYJ$E;L0k7~ zAs6-Lx?Bx>S*>@l!=&UBcA-eg(IFpVgPR1aAFTUe6wxL~pZ^pk%#yLA?zOd06no?7 zGxDy(#_78$l8dh6YIhJ6qD7o8xqO?Ijr?}}$0A@I3v7XWcn77C3abL*Btkm{vjjl4 zxzvO++5YS-;gu~{j*RmlC!u8#VTfyRQBJaY2S(u#fGN0qYvz3RJRgdnnl{0ogR6HF zpClZX8sh;Vp@CuQY`$8;%fl`?C6kr{4W4SzWWtgD2IuC^olsMf{`qSeX{biDf41)0 z=1UzOb;PS@&wkzbup(aH+k=hIy{E|iwxSL?mRF2HIK*Qcy#sm|FQpUZ90EAILBk1{ zb3gUuCySN^APZm>zbYii-w^tD;g~hUr1rN4&l3pyEa!zBO$2_Vbsd4=-1DlXRHGNs z&A^RRC))ZDi%ltT@OCU)%(oF7{x!G;Bjvzu?YYbW6N!M1jtvONS!tf$ia_7hee-^mdpX+y&&*x1-|exrak7Zw)L($V1o z6J%A9L&PN6e`cveYBW?yN$K%JWFCLX1Rf9zvIczT+jg9Pn=rjCX3f2v$U5yPY3%QG25p2LCTJjlao`SQH%9BHIS&N zR9|cnWrLMWRfbNHEL9*q!*Dcd6>Jg5?wRPJTX{#3=s}3$-WqBqtkfW2HlM_?E1WR( zb^=@g1bel$wIzCY(mgXf>nW~>1~zw317aG>!+!*Ih`$z^>Gw4CrwET z1^)4Ca9D(=up1!g)Z-?jozrA_dP8N!Ej-0$EEx!yLAt~Um%jF-GGBn%>SY^`UO_FQ zM1fw4=Ri2X^lq6yIFTj%2`_jK!e=bBAkB^VszFJ>0j5v*6AAFM5?bBRd$N`Sx4llb zlKl+T9sf-&MIb-XH=6>YsfqUBr;sDX&**`dpD*Ly#sMZ$0d&lUnp+X-Zg3=!%n!s# zMS^LSgY@7$QvSy&Ul`AD04~X*uvB*<jeZ>BKQ!bFP zJ@EhiJ8j}F!+*G{+yN>{g?WI&gJYn|qZjVdBqUSpV|^#Ib3+4vn2BaJ>45Q+u%<2JL3l4Na(!SdjtR*4BgyVXRaI zDudy_V3k`Bwi4sCQZtB$Mf@+9aw1~hB1`rl|M!SZ zUz6w!AihRwMFG4q-=^|8onc-=h2+2goKS&i%T}O+lWI)_1ePMcAe%#pazU!clYc&o z1?rTx!2c+mv7?>dFyy~q(yB@skxTE(oFE?jucp;w`Ho+KblN(Uiz1pQ;LaO79|PZZ zkU9o5EF+d?if*C@yT2?tVhXFNrIiF!Z-6uP)mZSfeX-vql)OS{LZHOM$w@aZ5R!rT zgcgSn4w8QB9+RJ+A7Sy0WQmZ*_9q_v_~)JQgcqR+91Qsg-o~l@+RCZp-ir~8kugt1 zgguS^<7HBzKYRclX?A0PcE2ondvB*d=b9BLB!XUFpMAB~{CJrQ2JP;VbgWwdz?|Ha zk`Tn8a}RpA$@H>1h)#U2(%AVP)U&8keR=PpH19cF!)6aO9}<)DH%(_B0F!%v`Skbw zmXx^076bxuedEP=_)7fd+lIHJM0MFl^2AXds8A3V7Dm@@1v384>z zHAdT-@EI1w1NhVP7HV-n@0xH=X>njZBMJ~iL_~mxS}<=tx@@)7UwXUh4~%3mBz_e4 z7Y4kS>X?aIoK`JVs~#n;A7j&Wm4`V$vIe(Xr1Ny<9z8u2)0cqY5^ZQ%Dm>katVCN_DEn!;K zP_3_s@70Pa(v@MWLx4}xq_UCYF$$4Mv zpkl9sXMPiK^9@o@xz++#7ChBl-uhnM2QaE-XYvHbq9{Bv*a1O7QzQurIf_Nc7zl%! z+Z6r~_-1;o-rOL&j}rjZ2)BEX`kMqO7h+C^dVPI;`(%84+!`3s3KZHuKZ5o|Xib2P z28iZqr14!aVrB-o9RL(3h_#v*G&fVM#RbM*{M3N`_tkT_f#R(IX7~<7pNOs7gCFJi z$&c=)+oKkdnCK@WBmjQ?+Wwmzti&d3Bok=7kK|}B+V~6TaZ51i2U`-qfYO}^CQA%3 zy8RZQgk{Ma$aCCFq6eOYJaqx3MV_AWg9{+Mc?y0idHcd5UI!-N0GGL%H<*~1&{U>D z%-iCzw~7ijXaog{+0(@M_$~#etR67TC5R<$2-o?IFEz=jRymP zB)m-_A~-;xTWI!VHNM}|O$6U#2A=tWqh4jw6^vu1P=bGWQEIoD?r6aE3t?kHw2DRr`yB z#kpqB{W4IoWOQg?pbE5mqF7xXLTc~4Sqs4}1H}ytz;j4Vfm)8!*5;-j;Cn#>o7y@$ z*6UQfu?0Qh6ml6Tq%-rn$t@p<`l_Il+y}mL9S;vr8SopsU)Neid-f9R>Y+jL8PT8BI9m$x62b8?))Iq$dr8mkp>T1s1zc#2kRgIR3%pWn|B;XnP|{68+^ z|GGlc|F_6fg42&)QsbUOFHM9FcCc^!R~X!^40!1Dt`-Ce<1~lQ_~T^0GAsioGV1Be zY{WhzLX80BL~^XN--Us*2lRX9!4Cy`vEaIpnNP+*_&FDi$UOm{lnxdhG(deOP#bCt zGzxvR@l<7;R~*Fvn0FX_)e;0*HrY;FSYD z{_gG$jE1xd7*jYtFWqFAcS&^0^twO|g{xuXKk$RaS_L(=pWu#FxTpf9cx1jw*gd-h z^M^4o^fqoLC?R$MhCqDSIGFRM@mON8>XezlLmv-PXG>4f$&1K8*^E%1$_E8jcz}+9 z0Q*5L55S})C~p8w3CV diff --git a/docs/regression_example_online.png b/docs/regression_example_online.png index 06f66afd70ac5b19c9e367b1ccd0146fc8c6753c..2f30ee5f2edf7dc0ef52b70aa6aa0599ee28f7b5 100644 GIT binary patch literal 28716 zcmb5VRZv|`7cGjryAvFOySoN=cPF?z1a}BdaQEQuPH=Y!8rm>pt97-PIa}G=u`;kTFwuRnba8QT=3!*C{eM2dVDDtXDCnd5 z6}SkjgOrvt2ne##=L@t{u*3=k#QxVeF=17Y%(E=lG~I=#VZn#LS1VfX-|7a?Ny$M) zzK~s{~%6+Ye*u+ELDO_QjTm;Y`~z#kgEj&B_$u&;5gx_E70)s&s>QK z`*S`ye*VNa#W(V|*4h2SyZfNy0CTX84n9JRG+qjd`g=L_mv||1v~OSIMW}&45g~3(Sk01Sh;Ki1fA})S$zF>C&w0uz zkP|0KJ9C(Rb!{b2Uu--)mNe(eZXh(s`KB z-C@Nn^==$iirlI%fkH;J!-KiTKI7*tA~4pMi4bi4Kiwa1_Fw@$^25ZQd#JJ8`#4aD zcn!l3{LhC47}(g(d-0Ojw{r^U5n}g`JZfsccwE1#l*pUipRVw|Kkmxr2xJ{(d&jP> zt`Z3nN06)Wy~M&?IC6T?_57Et>oOzm!Ius01o_*b zo9IV=e!eJ9t5||S$7LUULvyq9zhn&-pGSvS-gB;O-+L{QJjL=b4ntt=xxWBUCcQtE zGZl$R3&Nbp|M7bB&?1O~6y|xqV(~cxCPUG^2RDxIPY3Q7H0Q8-OEa!nDJfXzoe%o@ zUPlzMeAiN;$i#mu4k+SpG2{5IVHi@Qs(sI_L40=cH@{%OU9CG$6?vF~V*{fAHy(<{ zMIhu!k|g#`G8;>&HS7)SY!lD%KIi^imMH^ZD4HpQCUN{tYbU(CytQj*%V%zpk&!L$ z&nLh5y*U`W9*wKB;>2cCZB`p1+&6twou~Ly*VlD_oJSP8#1$jy0J$+j2SijPZPnIa z?+XgN7#8CJm|}&TuHhxO2_FUiJ5?SY9uOQsKR(e=gzXFe58mcYKi>3W*Y3Y(ZmjO- zx*(bIR6)?lz=XFiO?@Lhq9yXZRiYc)>A0CZX@dR1=>Igv_q3Pzx#&s-5=C}Dz%f3D zwtqdwcyJ*2!Hq2N_@(=Kq5A$Yz0rEzr7XBI=Nf` z1NNB^>mIu?eAj=-lwn?Q0JNiB2>J;?7LaFuZejnw(JkGNB)GA$QP*jN0s{kM>&hdI z%aJq+i(dM!2Kb^_0;s9ayrs7{?=X=!9aW|SSatV1&*0#o&2mj%yHETl4wAo+?iNVc zw;fRmJRgz?JZ=T4B8z|iA6IG!emjlXgzI0D%5sw}{wES)Vq*GScyI2OS5&Q@cpKN^ z)M{3~Sj@3mSz8z6=7NYTJsU5hjgXjVVf7mRyuNl)z3Xx3E+mwCh7bD=uZbHwchrz6 zUvih2^E&W+T0d0@P3$x6)T+BE_$j}W8Vn!^fS)ysRg88kICod;N#nkC*Nbliol`pVzBlZ=b0uRwDppa)pC{uf=a5k>iSfLCGyI zkBp5)a&mG)dVKS)52D(9N3dT!f1^N#+F&a?3ukgRWfq0&;J^Nr@Gk7kG@6+lv)4-4 zLa>V6ZA4sIWwlj&Q%u}IZ|YMctEGhtK)}k;^5IOu6M)m5OAlOG@|p4ncEk1c^+hPs zEU`hZCg&4Ke19v6&_zJH?MmpYnDtjvOrbd%@Jr|Lz$GY#KFhRy)`ULgO62kF!aqkr z;3@bskSh(kvw?Vw3?uRi2cnN>K{Qc{TqtEu!eTtPiY)y&Pii(8%0KDeAyYSwe?b)V zIRJ&ob1%N*aVLuJ?RJj9?{WKe2fKSK+xvWbCyG)2ZkikowomV$qos}A(EuAO54rHH zsEyT`m{N}$6KZy+kXNM)ay;uKR}k8u01|2hgr+iobi=S+bAeSDmefzAJx>C* zko<;VV+@OSW&X^!J~p`>$D_$JE0J0VxijI&b-eOc{|6A28KEQonNw^L>Jb9EvHZNe zfY{hrRb&d2KNAy04GnAs&M&%dF==+n7jdS+C_}rB9JZ^$R_SF#(%oc6X$Vd@%WRlt zf_|M?J6))~;`94K`ZeGH`Un)sKtx4FU3u`~`Z{0O(v6W4AkEFsLxjT|$qpSf)Rs|< zFJBYbXnVB`+y|R!%`Yt#SmCLusX?da(yx%En4v~eOe5AN=)t%69L4pa#d8g?8Odv{?qOcCu%W@F`Qhy7`MhjIf1ODwcPUU02@@cOBYsRS zhl(6_>oDPmjWP_a!Bp8Eq=N;cx&$1_tM@Qu(q2^?iDHT>D*Zs(X?*gAs@DDVtn3VK z=Y68hxs0V|qx5ghmTjZC0R1%Wetg`GibrcH8h4UfYN^^C-;9h|1}0!*2`Ph-;OTy&=4!F<++x0MK6PZ z1V1?q*jiPG+}cZbAVie_?+l$?f{!DB@V*5 z3mNyrx-)>wyPXez!MBk8AHe~xz!?$}B1Ve(ex2Iw0BlG1hfhVk0wpo@wpk6KJ64Rr z@xi@mIo-b!F7!QzX*>vCK3;zDx-+m^Oq#yGK0(h1SS+2im(vzNkXFbRpR+mp_2 z4e~h1=-0bqku#Am9_8k>N#DHQr{}&gh^e6~EF%LC%*w`$CkU{+ZBTSV;pvyeq~F7! zLu*-e`tLPaS9a_sCHX!C0D!02_#IQjz?U6c*P++1+x~Ml4&9>ejp2*30kT4F=D|k< z5X;f77a`3$Q&@CLS&-m%i<|w4OvfSIbzuH0b(#V4tWBwqJr1nX-?walAt!0l4FODk z3fNq`HN;G| zZ@bkvI5?EGv_{tKyC*oipC-V)E@GG56^yj795=WQ6TbU?d;V~xc*7D`Vxc@V1?I-* zY0Lo1aCayM0go+wra-dZs2}RQWf#fs{U+yT%KtraH%_qYg}-^-3Eq9(5n8AjMO{@D zC2Eihh-(Kff^lFrdnotYPw{4gg>@{v;EjrzEG7x$x&L7hd7m<0Utc2~cJk_P@`u=I zlg~C3u2y}5Oc%iNw3=*iKd<^_JwJ@tk4wHJQRs`1wKeT0HR0TFLEn7&`v3ya59G(o ze&nZPjc&Mb{*g}_os^vHxc|%I`t@e2fAJZf+mz6u}^ze?3gxfAp?4TDOx) z`G1ZZ&?j+LbH$*f;oATQDJm>19M)6(6+LBEK!QS1jL&8JYt5RO&;w9MNXNbaH1P{- zG+0u$hzl>`B}*ZtVbZF*zylOd{|~^IN-HV~YHCmbx&o|8Og{AnDGU=JJk+anM*x8V zux_97&}=tTZrk8|*i~NE_9bJ=s~&LVpUKICb!KDtPi+@I2fWaP3t3Eb2xP45`Cl{u zoC3X>S5Y`~tL*5=1a1cMnY`yINJ_f8vU+-YInj)XHP&C-U~RLGHX6ISFZU7^&?3Tz z{sciGt{zxle6H?eKQS1AL+4Dek+D&p>5u=-N&?32yeb|T>QItJi5K--HXt+J{A?W^ znE{Fj{YqGDR^K?D>ewyGNRBu zly7O2p;ie)F_iaGp@8Ddt|gW*zmyPUt-;8 zeUR{2m(!xiBYeNv2JM=`3oGxGi)Zg%j-TJ|Bs=8svuYvU!(HOczlK_&azT+bNpp2zFvd##+II z@*Jj|nibpt&ccU6qX8P{ID$(g+;a$uz|A=>iL9zng3p1OHg|Av^GK**A;^)^4i7G72=-C>=LdteXs%F_@j5 zUDtiXwc~D4r$zVWK>2g#THo&1ygf}5eDkcdyWQ^0z>dtJ^-B&FaKgz=b&4^r0OVou zeZEyi1_$vc-cL5~Kw;2s2nPra088t><4g$qlUZYL51R^fSZimYF92B&#5py(%ax699BzYAvqwXiUFp9j$!Q&V}`KCcQ`WvK7EgLMc zfzb)%RJT*eW9wI^1_lOh#LAE-+z#0`&uqmRYo$a}pPLB0KbipMfmhFk3=USYnbFiw zd30}5XGw;5sb@m{uoxjWVd`QavcY2kft!&i&vW$SFQ84Ra&9Ri>o@j12FpAX+BL1?bk>>>lWZSRZDVg85p#A74CU;-c5orW2XX^^dqqeK9k5zFC6 zDjuE8a{oILFG&MLySA*UdbJ ze$Z_5(^PVUa{d;yclaJj~)oiGP*EI2Y^ z+Aq2?3SxP_mmK)LtZh1tT3LVq@{oqRM|UYRli*Vp9rAaEe7a@{>dUrYCwiB$yK9gm z@G0Ld!BXY=SD=$sjN2Z&T8xN(qI6vi;q$(|E}O&wh%*jspbkSIV9uyMaSHNLI+ZudA7qkDID;X?d!n;6G^a;x&I+k*x` zr@t3B$F`sNoY2sBQZs`lJvHD}A~(2u7`0X`uPS&8w&&MP{NwfY^(?3IWZ8x{M^sd_ zpQr-+SMfJBHOw5J>k;{3e_;8p#tb$?>`wVhtwxn3YdvQA&U5I;c8ZDLeq8AL+GKR_}*eE=y_@39I%H(p=gpN2Kj zMb*`@;Nju5#dQK9KAy%59yS&e(zulHU6RO^EyuS>4<^U+ILD@A!{d-@ssBGt zZ51+CjJAXbri9DwMXl>R!A_^wmQ<4C*Y=`mWJChEE=H4?+#AFC-e>IQQ`w!vQvd}7 zSer49u`3>6e*qB1ebI5#T?2X%FG)C&zEA-hA_CB?^On1V*VDzdt0y0Rmr(w&|E(T% zB=yGnOc+JeGyamkH$7wb>wo)Va&q#C1um2i>-XROqu;(-|R;i{~XMYF$mr!ol|c7V0@qI+8{FOk9E#>=>W zQAz~Q`i%KcvHS6QAo%Fhb>7*TnVES`&>I*C2DomMfVm^fhPPqCW@TnZ0a&viHEUn*(q}5bL~GIS ze-{V*uw7b%r%?gD0{If>l42+TLFZ!Ls=!~?>lJP$?0v(w$t2E~PE`Jvswt(BBHmp5 z>cL2I6vJl2#B#S%ew*(kUYx7*N}+}>DZ z60v(Z3WCi`EZ;ddooJWzKYT%?WyX-M>Aavm4S90O@^8eKko-vyB$S~%Pq95c&15Mp! z=SZI+A)8nU{~L#`BQG1NBbnA(oqc?TqVW3R*P1NtRzM>CeUAFScau|Nrms*SlVq1G zlp5G|vY)M^GlnHwK_zb-$nO)k`^ulwm+-tt02q8L(D{9`n-K{U=KZ@@)9dTe8+Hn7 zv9P|vx}1m6xXWtcCuQ*>V|C%;1r2PR=)I8AQy={QL(icz6ijovvDFK_)Ud#OYJf7= zTFp^5*{sZ-Y^F|%p+mJ96|rg0i-a3NU@Pfdu-zqp)=g&d;x-lunG*kd zZiBD#uM$Mpn)-Q&Y6J`#7J&7B917yNahizP zVy5*-2K#-LY`!6+%nDg$4l9oaXYe)92ew3xV##FKTyvhF4~In`20&_geUEKO^=kc? zwKMb81}ngZ=mtdZtsMjnt_Lv4OMr5@31ZP}2UlHWW93rA0W=EGsL@9Kq0E3ZD6gm} ztE?=nt4l~oK(pb%4-hh}rnh^>Ckl%h5<-}3P<(MTcIQ#Nh^wnDO5CxrsP^fy(w=t{ z(_F$Z_EiaYmFw-i&vIE+jr5g|^ksgk+)Sj-%N0_S})Q8>Lg<6MDFgA92^IbYBh$S)m_hDf!8sShCL)Cq--Wv z?F+L}9|Qoo0)`L8_n~WfGq_7f(I=0(-WR0|+-wMQ#%5W>{S96XcWTtd_4$iav)Y1V z)^F`&FtFP|}5lTR0#Y|7lh6?F{8IEiuZHSR&O zo?P{o43G?rI5C!=rPb93d0|95#2=5)863990@+zv6Er26eMcoZlVv5@(%-&)gOmK# zT3Q}xCHy5JmsA5#Hc;Cwle2h&@)?nHZVvgTT3ECv;NyC0eFtki)9%ZA6@Oe=t4PDH zlDQG+N2M!Pi{7MR0DcmRZ@1O58@mhW-OZDePd9z}9%(e7Cokl$G!(j+4OGIDC1Lu+6+I$8f6((ud-T-@= zL6;!U6CF=)IdsH#%ZIB-T~0}j17}rNfe@|E`h#WkTUwHi{d8;1HWwOVzz!BLa6ia$ zcX~N%ewZnpMHvf};yqNVY3<3P%Dhc@GY8zC&(bAt>wbw`#?V981OFuV0RbP80-R*C zziB!lK&#|>iICFbn?QtYC4-Y)K_h)9!JjvVToWG_Ij)ATp}>?-AOAI!5%4zrJnDxp z8&&Y<1yQW@Q#OA4q!9o`F$T6+RSk`cn?~<+SPeC`?T43<$;tDxyy6r?@OfXPcRVKJ zTyByQxKXMf)ocp|GTuPGO#r@@sJ`!=nh=I%7vOHUp0}N(u~{kS#1_$hzcs90>s;xL z%BiFnOZ9U@i9G@Gxr;9^kU#;svaX081SJB$qq9672%XW6H*|)%hDSqRct(HyFhNp2 zk`OHa@+=l0SCYObMgLP?B_)U6#^*wa%CO0%U-YtBG%X; ziM?&nf@a{~!ZcI`0nEHJ%VJfJXf07}>$L>$7vnGb-9kA4Uv^$o@(Ey?o zibXx;LGoF2`mCEi0v=8fphxoZKz{&4f8DON>~D-&#EShq@>7_t`v+IumrZzE{J|SC zX{bVLOM_48_*p-lUt9!p7{ax<1RTWxR5FP|pkNtQb$$rQl5)|fFIMmyHa175;(9+4 zY?iR0st3@Zru>^?+?UwE^R2G6(8qpp=fh;A`~q7R2g*i(w#EiApUiBXy8`kw8mOps zTtFvPiLHjf|7nKaRmX5#Hz3nLh(~P($;!%lJ(LtY?ivb3A{@|+rplt^pBRU-GM@Oo~B|MwOLxZ1fF9WSJmxJC2u03huRz?etK zAKCh>=E9Hmd*SoUBEtH?f8&$1gZWQLuad!{#e$ut$E-hMIVEvPPk_bC&rF>fI$Nii zt?^LbbJtqK4Fnp4<*xSrf~0P69cn!|Yn*o#n^6s^CS(ua4a2@)`zoaj0}ZBUv~#AD zXU5M~g*L2)Z>k~G0G_#`$R`0~?vISMEV|qguQvJdc4feKdASMN0#v=9&g}g&nno_c zaWnF#u_A>X7XvyShr7L7aQ#IQY1Qq_6SZWQ^&o}OX;FL62!xZXGiTm8q@RBgx&-gi zt**z4&RuGelx{$5r<{o#(iBdr_YwnD!=O4h`?}`xY(!tb)JY+dKc3sBhn78zfcN8Bi|m5aLQL zfD_pQWT1*KWkTl`P?ETjM5e3tOYMH)C`wZJZLBR~O;gvXbULp@KvY?2!?QtA8Rt8;$^%h2>wF_Uf;ujn^=d1u5K5s5pIB@)w)O{w+1!U;lOn}}9y z)I|x zn~1RB3aVy2=^>F752M!ro?5gfI<&SCNWreS)J?s>jpD1R&5b{hn=kt1Q(|?<9rB3& zY{E?C7X|{`3~q|(fygKH#@{&>Ac<5((G!bK;V>+)wA?tgs9nSe$L*D2tL*px)V8)L zYhpbASP7!9j7_a45r`hGLrYSm3SR3``Qj%S*%vBLrRc++GBHq~4kv4&V6+qOR;z#m54+2WXt{}i zNIO6zY{w$aaKU7`N&231euv#|@k&U#Gc`X5*X&CR@qKVyqdp)TL@lUy*sz?a>UG!p z_2f+r@L(90(#) zN@cXzsjBJn{>B%o4cLeo(~vEim^fzlBk+QhjKd2|nH&D7$90vm`MWC_vaGoY}E za5>Ph5ppZNnN8xy{G#+?a#aaEQXRijH;PMsPvXKh`wRjLT!EdQmOpfi? z+ME&mbz&t<@0!#{P<0Vn1|2HvARxDC%Ul}lBu}D4on;I-E-(Z_8k0Ecn+HnnFJ5MFijUF zR48&kPo4q|ku(_AqWRPp8x#`^e#p5-S?AYly;@TQXOWW{&syT;TJC5^y8X!CiD-x? zMylm%iCG_dQkDBir=HBRb6E*Yha}XoI%d-(qR#EH=`$Daod(4mhhW~=(SP$;rAIFM z4!y)}(7RC>fEIR5mQaQP9wds@m!6JSUXE0Dol{t{LL=KD-s1JDGdSe2>vXsbcje%e zNU#m1%y#9kf<2WN89pn9L;4#9IWpPwMmx4@x)!#d)0G)^ zn*HyJ>19@G2+aHIo(Mx$LQW=5CA57W%9D%kowQqD7G2NAD(+@83KV|$0-ceS9IZ>! z&*Wul<&d4M={oh-Ghq)@#Sm|d0d|wakptb_l|Ss-upxwWtETb9FNA*+{gV8Qm(AdN zwb9<8up~oK=MbeAKn{!3F0~D{;gC;C>KDEaNd}*E*zZyx#g|Iu;5}0eAQ~Au{Fyul z+K?Q_=$ofkR#wn=;|ad%;s5)6(vt;F5hl8zWZcwl7mG4jYJk%QMsAGqAXNOCkKaV-OTG=Fd}u~Bw|iHG!(xp}RW8#~L^;ien(#z2jJzO5@JNg{&p+;rkab9j zM(^6r7!2khWDd-Bsj+nWYx0~MPh2B0y3-IW<*!zS2$>vCw|<5=06yX;gF_$|$OM>H zCDjl)x278c`c#GdKB_ASUd#2MG$X1Eo;a9H-0V7TN!i?bk$DBIIzm1us-ZEip*x(> z6&7GdTq`}pNR%pt8x+#h{KeDUMkJDET*`bGU51=O9x(!c? zta|h{M8U*$kcN!2D#z}ud@Uu=^TC_$4`R>r8wSsSA{ z>{1yKBw0YYW>UGIdWPD3gIi%L;g6b-SiIM59GSbXKGIZfw`ZU%aCEE{Pt~3XAgR0u zEaNkv@;NBN_5sT*ur_O5+C(4(b=EF`#QrSvfA(&z``k>d15W+xRh<9&Z~Wjw*7|Ws@Y4qX~)thx@DxeN)4rgG^_nlcnm5vbfY!WG~fISdMB`hH=G2P281rlsglO{8uCW;QkM^J^P%&HgS|5 z;vkEZ6IoyKag7gpb@`1n%3vLm9qb%@E14>+IUy#u@nn~OFrrQEgWOaaAX9WvS3O`e z0{b}!EjUR9k;%q<)PdwS47Ib3t!DF6kSvJ92B zrUB3(?AjF8yH810!Wat@EGQrw%8wO(JXtNcm23_H&kTKUzt}J|Vo?M$R4P%-<_~*; zDtNv51St-J6VR%A{d`jGe7B(Cyk^_Hee>jXT|w?G;tY2c^_W;eqH>B0fnU3$)b;Q^ z4H`Q_>o1L{?Yc*0kd+!2e zfS~aS40foW=ZRj5F0dlc35#acvQWELyCYPk;O16UWm6;BG^=GfG@gD|_;Zt7*u>Rr zCR~$knft4V(5!UEjW1%ca7`U_Yzy;-i$nXLNBA}28RlsuXu0I8zT!Yvn^_>I0>(0q zkWxWd2dNLxe(*HJ+12}awF@}i+d!prryx#X=dAYgG zzW*J0)dsJ%uoU6@dcW1tYP`7IMA7&{)(f@#%*^&Xc%i!I`Xjsn1!N)CN=x@DhbaJu zK*j9f9=&eBRM;5?+j5*yy?q6+kD)ZO!@-i23UEc6)?x)S4XxKEEBbz)F>{1=CanhL zhKaN1Il$YZCbr+*TqTM_POO{1K$lqODCa_mE9g`7)gHsyHNR!C&c`X$cr5V+0#iFp zpKSVv0tfb*fKph4f_7E4a+RX2KEx2b(ieWQd*WG6r*xwxbM{Z@p0NTX21lA%hdB{s z&ywwhVxhQ@WBBhNF--_atu~bIz`XqXnrOTYf4{p515_A@meabi&;Ietm$&g8!5jd< zQh|y>!e^QC!tbHIPRGZ@k)acOb5vJEdza^)V?t-sy0;w^8mm|U) ze~uDPsoBv;m@yW#W`s?{9^iE=1dohoF)hJC(QqZY=8W+*fZwl5PK|r*o@R4KU!vbI zk3C^HMoV)U1RgDmI)Z8NUY?^NgUv>Zs;i`Wg+#ieU%&6#gQnFBo`V8+3u-{tV}J`0 zKDlfZuDH}%EL}=HpvHt27krC$xRfW|aAf9jWYrl(S+Gd*8m2qk9_XxIcN(J)dsa(e z?Ar%Zh1zHe$vTXv0v2sq7g~H)BsDVTj6;M&9bPDCX{C+4!`#O}*A}sP zD{e}4ByROiaQ@6ljVC;%0fQ4M|C19%S{{Zu_sab2NZ*7lVDA}=XXO-!i@U}`lX{L= z9>Fl4NV>N}RX4xk=b)`TKWMxxf%FgV@5a- zq#41q`K$mPkT%%xmA5%(OE^3$G!2}4SzlEHQVlh4M<``%WGr>hQ8RnA*e$VIU@j6h z4g~HH3MZwd*Cq?udK07Sm!arp=0VlqKs#P!o0D*yF&>XbCmM-)&*`>XmusQqDdblU zM{vuvFXk61G-AGMBB*+_BT}m3c`Q>C=|f-a>Ff=4yn}lwAWJz?sX>g6ZJHuI^>Pq; zmd=0-f=IkjN8zaLkNLZ}aJd8~ZWd@ui(O$4ep%7Iut~du>3mtD9)~s1EuQUNv5pgBJa+WagS?HmhPm4acfmS&P zQRDg0As(~}^3Oc$HJ=Mc&5aauYA59L%VMnkcE+?(A|*JlB-%uy(6h9Yk0<-@Y<~-CsELAcRBqEy5W* zmxlTiKC67Fbh#IvZ0Pn&s#TM-XkcF60YzoQL^jU9R`r3x zf|D$>PRp;va?4##K&Q!YAY0}8$%+#?9=xM85`k`bI9@}4AaiUCLbPBpW7HZFMzS7B zyZ+-lUghC!UbPqDV)tB$+fT&}I{T&?1j6cc6m-2XXhM7iJSs4U4qJ+XEcy|t>B58l z@HwLv%bQrk*)1RWTf5x0JMg+4`j%5XaRDkxq>%%e0-4L!)A?HyX4zeq;{@=0-kiz6 zx+|BV?U(8Xkjq^YeuOcN1lY#odp6K^$rgw(0%%r$n4mb!vs4UbbcuS_^68sHray%tRPP<05h*KR7$+l{vjKs#uf*cxJTA{K~?KY&WI6sVCz$QCj;uZK^y_}SU3MeXKLe?}r6g0H|Dv2{K(27pv!nSm7 zrkblrFD*894wrm4au`?|*rP_pA}yo-HlxaIkTv!N4|W^X+YUNINSSGSwGj95?kPgQSY)@UdRCqa}KckZM z7({%q3dd*c-hhBy(+tr(o1Nn+1kMeCz$;BH-e>3iS)e}XXZm0r`^hB~r!g`oTwIvd zG%E9x!mAWW*N(0fR{Y+cdbnRnf_Q~?&mL)FKW*J0gfX$3V%k5`)oBqvB%uC}SV7&_ zIGY?85Rp&4?p+=Q*4#HMktpbwV6!Vy#}b8$oovHqQ8%x}81=0N^gC8KPh9+u$F50r zEL+5@R7m5Y=Yu(4;M5JOEbFUj?W(J3)QaCfB~pd&k~Vf)YQ^1#1A`m^WvrY6Xx&GY z24{O>!=rm&%g5C0u#2E+i-rDS7!k)*4BVNa?}YtAsSs6xmX%I+>#Cdt-{Ll*6dsGY z%t=q07U_(iSo{gsTP*NP_4oJaX}m~fj5Shq=NS+j$M@tkX$-7#P7`Lb^X!RU&U6K4IF@JO5Zge{pn-ewnCVC( zwPSfu9KEh*AG2O%g5e$tUZMvzwP)KRBCv05zrXll08z|2ypt`17FYMHZl1V)a=QX* z#fZ*kRBfJZ>h!Tgg#&a5p0|c99lUz?R~~2&xZa~s4=QuI@r4ho{<&nh62BBtkJR`f&FfAC<|nD<+A=H>tMY+Xhwq7FVLV4>DG`O301;sa#eoPu*lp2 zzda@EmC8&_e9`f(n`&uHOX!UawP6$@^UoUEc(wGTFk@VH_HlQE&v!Wn2gH*w^XT$3 z1dr|_S><97fzuhe(^wK}4W@ZdTon^=0_+m`hK{w)MCJYTHeX7>=Jtyz&y7kulOxRJ z=lhPcY`5W04K3qSYyYhP1qB@Qj}-}EvWQ{k`$OGHX5}rY|6FXYBlJv^AodnhSO=A- zGljnP8_rvad^sf|(QeF%SSVywc&%7X2u!n)OWd2ia+9xI3_rds;BF7kG7~99;+R7B z4DA$ekrVSwA31E+q|$_nV~D-Bl0(Q*`S);uj0q}KE+-b!WQQ@>7)C&a7NXbM9=#E0 zNVcasQqF=K1Nm-i948ZZV`M==SRBPiKUzB}XRKDTBJ6C~c2h>5DN#|MK(caKBHg9rf+kn{*v0`38Kv->xstP4L*r|oLP7k*ew&2ml}NQ~ z36pG;8GbV=d{d$cD+&=Fyk6~%>a8Y-IoA@)zyghv`5hMoz9kGfwSzT*R{1u8)9vKa zi{ElUK6P~UcoE&OcvknvDYi7pxC_`_&E*BDrI?!M<(n-Vv@wDD>AxNHA&QKd8(-srQX#2tgAk!-Uh+6#se6{0JC5x|ki&U!sdxQ-U>gSQFZHc8pqHozSr z-dOVnPKd-9BM(anH4xGzs&7olRkMRS43|+PapxNgGIeX8x?>@PkjxprLiYVxSZviaZB9G);Yaf@gwJ4Y%i7no!m+nH02t1TQ|0y6Cl_pze_7+YYgr z0vVaxa|0KrUru|@rn!uNqKv-pkS2?yjX>ZV{*#&RK=T<4t`EIjbJqcFc`7uv@e;Cm z1m8nM&EJ<(&pt|j^Nb3){Fh{}z&wpU^3w^GuAa;!lC6tfYftxW>-Qnj_WTHw%h(UO zi~f8nFDk;`x&+_7B-1&)pp1_-A?uRf7AMXnf<%h^eFQxS0aK0e6xBS5S{}L#5XgRU z{>51Ooz1{)AU7$|t+2i40Xx&r)eZ6{nz;%~JH) zZjFO#AR%{lr4tr!f7Z9iWn+aZ_MQoKA_ z#TG`**CtvYHvN+KLDE#UzuudzQl}pg^PONosNYv9)tRl2cUGUh!?ePNtlKrhva#IG zi^~9gywUht^J}E=<27%Dn!ChOaXpz7wG_LTH7p0-WD4~oHjP4@$>c~n`4B<@G&jEj zNgojbbW8O@`_wH125AgmHZdVg@*C6dQSpGeRZj|4M+gk^P8c`N$cbJl9kOBWC6n*+ z>B*$bTCxcR!M}b_LT`DRKsn0rm6YSPHc`_~NQ3p}Qaj89m2uOafU&W2Dxqo9Q~s8~ zKr?#B#8W`v6}6p^RY{rIV3AuWjj>?n+8kLvz9y;#7gxg3$rh~|7PhwNkesAVzJ(|G zY(H)V>S~`Yq*}lU;m;EXz|j+%jW+fL(Gz}PX6wd!&8cQrM(nducRuE-<%jjT#9;J0 zR9Zx2hT$J9>1u9Ht1Xipu#cH%Mg;szQ3Nt%DhW2Yjk~|tYdYCaNYrZl+dm?8*e|&l z2q&l8mYyldzhW$AYT9S#$L7Yi`7xCfGDU=aP&mx2`1qF=ln$POzpwvx)HD^rzP9C; zIL|;=DVg=b^rdCkpPX@q)S?BJEKMnh;g*{=Z}Um(yV(?npC&R6K7K;7<7h^Gp;?xT zhQ-lD^0i~Dz7(o#aFN52BgRWa^>dPqRDH~zB>7b$uT5a6zBbOFU#TkKN63WmS2}Fv z(M;-GuJ6=&5U{O{pZc(Pn(o8t=U>Pwbvq;T+nSX!QX)pO^J_;-jROQQAQPhqiF1wJ z)*QSsbnTE6M|HVuzsIcz$F5CoW3B*w1uk=Zd(JWx{HvifYU{oa8=psiw=yPq`iC>g z+UO6JjJ}+AoMHPtK^)qkg)&=~J4A3)%TLxpM|@>b*PW-h#hGr3Jt^jSyb0uD2aS5! zr+U1q%^&4IMBxmko$Z!e4@OF^I(}Z;hhMUNSA=1|zIE4pr1xfzI@&!UL@ zF1b1YN8%q=%9Vj1P5Fy-25561>7jw9hHE3sSV|X5Tvv&F<8qQAu^Ar474hvcKAm<# z;(f;m_Ya__j+Dxi66-xOvuw%Ao$3#a)zx*wmzN7rgqLR>gF&XhuzMFdXep{W2pp1>|xLzGQnEVG`iyp5)AdwK-4CLMvfQwjJ6deuy19fa;Vxd8q;=fM4Af51LyyJvcNXK2 zZT$ z_4p9t*Pg$%1f~R|=cE`)*hQs{R*ynf(j&(u;}${>TJWzP2+;5I+|+uhGU`ZNk=*% z?h^GutQp)9S8QpXk4Z2V?Sk0oA{ppxysoj|p)}N(%IUmcpzm@C0nuhc>jbQYBfqmo z2yQp2jg?g|6N0p~v>T7Uva*P1e>P(q??p!qLKr&Iz4}VZ#GX*)XUTvO&9gWnC1mz&% z3d{eIYqE|qkykMMs$Hwfv@zE7uNvvsRkGh1WOfYDb;uT4#&iK*%WN9aMYchuL3p+Y zR8zkT_$yg!`zG8iQbe~cx9?pKMC6NLpcEIf1R0m92lR`jK;8zBGPOu^*fhho>HjU* z8augtSHhoEL=0Y=v9)`ZU>-(P{^NKtOkX{QRUV5`!Nm_Z^QCFgRd)4~=654hj0CD# zERrH&X)V&^-2OGyE^UViQE$Y)F*a7ig$x} zIPNr7kcP-Gh(nhJ&_YV63%TX6%bCd)8OmBqDkB@4n6ohF#>BNLs@4_u980bCluh(B z-bJ~q5v)hu{@6@EwtA+CE%-E|0Jwo0ac@=>$!^qeNeW-eTb}NM6hJQ%X{Uy)mc$%% zMl`pPCWsq8#EUS^USwBvKqyQMcQlMv8%d!Vo3(M`=D^(;)5|Y25(i=qx}KPuo)J+^ z!PhJ@P+a;9kduoNV^{!edr6;)N& zJ_?)=N$Hf{ba!`mw;t9nxI_(xHGzNJvZRncMICU;Ho5-5KW{!;5!p z-FwZo_MYomb3V`SvBDcnXOfAPe*hD~Il>jHkT}+?v4D<3H-@kEH-m8+T5%RqiSxCDxR1Yi8>~{Ki$cqvynN+m7IbnktD)E}F#TL6X zyZW8;EJl@yT4A##R?ilz!x|%g#qt?k^#-TtKM8jH$uJ*SUWE%4%s(NNAt4@Yd7e18 zBnd%^QH_7xZnjJ?uO3ya=xNeB=_{* z&?7Z3uV;aZ>OAA34THG+&AFCN>MUQGkIa_(UW&^~{E3PmMrnC-vG}>yYqm*XdELqSkLj6KiElT2N)`RqA1t zWeD9qkd`zhob}KbOV?$6w|Z5YT3uip^8Fo|AKgVRfjhVP%rRFM_Z;KnAbhzChoiya6T6 zS(CwQ$d*Sqz?h)_3ODBIwoK=t6+Xli{ys;5*{CCOEs~aT3SWb4hMRJJCe~IoF21Nd zD`5JZGB!iG`WE-IY262p-&parwMRHYQcvJYGdD?=q`;rHwJJ`nI;c;drg~Cp0*_4)-}Z{7Eil za8`LKAx3fG#d=x~$u*ytT0s6c#L;#_6FVHNsw~N=HNF@khkD%)wzjIr8ULF+$?+H zyz5iz{Yb#BagAiv>ZiN-Llg7Wq^*BXR=r=`jRPV~W`s^xxD^jebuETQ%1U$@rBQJV zJMzK~!|QbKht$?|#uk|?q{UnLS;T7vioBxnbF{E)|IQ%uw7FU@`XXhxgF$V9(+xbG>4rA=1>Uk=o1kxoIXX z>U*9}vx_55jh?+P8Xf$kye@mDuP~R)Jn|vM!bLX6eLoxwWa`rBX zCsf>MnTAmfYxFq&SD7(}T4Sw}%nqCt$ol4N{?1-7`rEv^)th}b*BmIxAb@+SUy*{5L_o7`64D(g*<{?$o>x~f4^ z?~VSU7#wJYab3^NO8|}lh`=vFoqh-uq#*Ro2HkLx=a7b-*ArR$-pi5Tody{CxDAFB|!A)#?&9 zCggAekGE0k9C?%dF5nH*3^Xf=sjSqU-BZ;>d_B zy3jE@Yu9n(iN=Tik~fUH>v(2v z6$RNVnW0Z=%t!{MoDh!?tQg|Q{Yya@RdT(^!CV$vxfHKQSHcuj)^%2_G!ZkzJscPQ z*YP^MW7Qbx~ZTsIJ&HO*#{hH{qDZT(Gk5-E6|Me(XJ6J60RDCjZV<8y<4o zH2mD1v!ou6vE0TU?2afP%jB!uRbLWbPG>(%C0r34^UO<@DOW)8b#8T=qq($H^Qg{< zJ*OGcEe1{jY)aSHzSg@($~*316R1yiX)r-%R3ihE(Xjp#6EZ(h8Ha94EP&Bh~IY!1B31z`yHd zu9Eh+v;OlG-G)?d&tliOvmQp>PCfENQ^k%{hX`9_4)iavw*(2Pte;^=eop>Gmi1I@ z%p(yiPOhb&FHDW#+fvS8+7R;k#uTwlr;)K+xK>#@H->*GXb@rPFZ|y8FG&c&mEA$g zx0_hiB3edCruvAZyZZ6rH>6<(xOLF{C~vXH=wDMPVwshs6wKw0q9zKC zEW9O5hXbSHCkzDmlrGc+Gy1q{n{YnH;%+TdROlCqns}j^ld(=6#q7BoNcIP^6EuS! znpu8B=cOEfRwK{holh^7p#!-gJvJyFPMcOykYE)PR>i&hbNC(x`86TiwslHW7E;W4 zv*TJmi8HMO>j?ZF#86o&xUv{4wY^WhIjGN!YPB4$U z1%%r*y502=<{oe6%KNdw;eNA8{uEK)8#8>uDyo8JrSut6)f731BHaE+oEmBx8y&sm z5bk>6(D--T?&xDWJ*meDk*rEYc)sa-N8;M~6m@R5v)xlG#*2;geXAv7vk|!jbZ$_i zI0{PL(WB?xy7m-ryoM~7Cz=9rR)lx_&5JDI0*LvPTQ2)Pq7bj4k>!woKeA&ajI3Yw zkw1Py_&FRqZwTWX)+IF6bcgeKusFB%C|o+6hKKU>u@EOjEbj<1N;A*>!twiDQ_|48!5NpBUfmgwQ@O^bo{kd5#A6Xl1vUqKEr#S&46{H% z@$!6dPsSgrbPt@k;m)5G#vl`1Ih)gJmhJdNp2LJ8UO?$P!4a}+NlbI7*|$r-j_YTj zXzRfr>up|xWpx~q4=mn}gzYOPgX5)t(No#KRi}8( z5HHXKMH9zvGD)+yX>PEu@m?!CYALK;t}c0ruCIlb(HnVKE~?^422)Gau1na}cHL6Z z#>p0=38kcbdgjEW$Zi%B_(sO8(~*a!8E0rR_Z|zfGR!$lt-6>xw@BHb=y5M?(40li z`w4NnYUq=9`wK`7GjG&I3I{$y++;If1Czo#di=8Kg&fI;&`UtFC=har4t#8Z=OcE>nv7H2a3 zB91uu2>0(x*JwHEAk${4FI1a1L=JcIgrBT&nl_}<4gWNy>?I(_3Zr1;9m;BGb11cT zr8zBx{|>_J=RZ$oE*F788uiS7)b_{}*!zXec%6q37S5}yBEbg2&x?b~XMJPh-JLYF>WN#B4&DnYanVM?}AQdpN3F4Bw z=QAa+S*5I|;W-;P%g0%jvZx*L^GEbEUU5&i@`e*9eYF+f+lQqt(I|NaJnX^&(!d$2EI-0 z+3n{70`38KsU5MmH_tS!zT}ZVNWb7ggCe zJUf@zbCzmzU)CkSL)QQ^T}|F$FJro77Az>EW$peYxpU4BO_BnTheTx*b$v(poB1Da zyD5uR(xy>sjb>i%Zl|8BDx`9gjBD~zZ$rI;Yihld1*ftBQG*-& zyzxI2ZWtOq4elgWSDYl&?TY^JU28Ojyx6qiDnh%Y36`rNwz-(!y07Zt-@Ufbw&U$D zPt5)y0n{9>5GcK)Q>Ck+t2+Y9#~YW;cLEbPS&p*oRU6LsX9%8?poSvcPJ3x=Yw3Tr zc3TY|Fy|$~K()|28Q0bH+9o?%c^<=_;0QbOd)@j`Bh4<1WW!z2&AMt;<|wj_{Z2&H zm21ud;>_%wjKrVp8~&6DN@{`wx)DNFcNK{Yj@D0jLT%rsIuMY_k&nW5y7d_jAbDew z=Xu!rLh>MFW#POtdSAhU=Pv(JGV_($P^y2V7svP@HjY%UJY5bk`8e^pD~yp@aIRtt z4N;LJq7DA_D+HZHip7~kOHA})0-NFV=7z9-1hNZZ^tFZVxE|aA?)@0L{BO>|55%#> zzo4oK@BpN0XVJV_{OW`k+mb-3`tSbw+WDLSWN8ryF z${S+kPid(A5!YG?(xW|oY4~8O4JE>V<9@Iqu`{HT{(OochfgVNn=`;~g4DH;cC3*#g|vmPu*un+l%d*#Zxh7|7;W>0=Kb^h6cD zxWs$LKK5=%Kond^B-C+HsyfiAGG)A*2N9>mxBSz}FZWa$Q+n;H z*r@&IHo<)2a>JZWeX&oE63%0zW5-5>azoK~k^NB`I&9!61Q)brgPJ>lR8*Cbw~E-f zAewiEZkDC)x7fQp;_N*1xCZtt69%GTz*cKW>QX*dd?P(4% z6NuDj5Oi;g$|gv{-D@Z&-FetRaQVWgsVgHj&-dtUCvwTxR>XkvHBTUYfO((HWSNNT zSBJxQ-+Q0n`nbk(moIJOe-4aCp4tEA7tIo$l$>US(?d@bC;iH2vh!((sJe-8BFe4(o_1D1 zMHX`zQ3n~KR9xfl07FE9Q(R@u8{nbY$p;3C(B*kxPft&S;>{EQTfuo^aD8j;|AE`j zi6A9Iq127@YALp-dr3vTH;XCJMI)ugWlxEcx8-o-m;Hkix%;?}(Cy4k>ORdOkLGYt zG#&W_Eehy0%2eOv$n9Z;;(eyKE}!Qzc8|{yg>1%YkBzyFvW=~)u02k>dzI;D z?Z}KF13ilpnq(>0Ok#FQ^(eW8j;#Q&e!%5BOEN2IK?nj?p}_DYc7pXSv)EQqA|3mf z6IdOpQ&xy2lX+xk%yGDhj|pSa+aj(7Cz^JuMH%BI?q`Sbhdz0SbV7%G#jvV?Z-NdJ z@E3)hojFjE!l!3T8)PRPXt3`li|DtruqaVqwK|6Acdc+~^H?X$*j!T(1e@S~&uBsB zZOye5z%A3YHR4$v|4>;?s_Rv$k4#%^&gQlwQ~b_|o8kuHvNA$Qao+jPRti;Zn&9rO zO+3@?@9l>zaqVe7gzj759j$G{|6Y4?Yt_5m!`=VAfr=5jN3dB-2v*QBm_ zOh-4F7QDpzvC(B-K11I%=fK9^uMxv6bz3tg;@q`MVR2j;GoP5GglS51WO$n#p3@Ai zBW^QLGyac01OthUMS%_k?CWWXR6kj$u@rK5Avv69z~kGpier_2;Qr`ChA02B*B*ZA zgDJ!bE+^BY3mu#5gw&rv@t!8r>ilb|=i}uGv`_YbQr7cJ3$|+9&Hum|l4$*~$?IimiIu*k0tMt&$@Gx%4Q`Q}RO4 z66KVAg-LuBO{9g84)=Xd$2D$HKtJ$nd+_d{B#Ds6Ub`}?x_ElW>A9s`b%#B%16Xrd}(%7b?@|Ck4tdwvGSFfYVSXiOe zxAxLmrgb!i$n!hIl*NKCNH-$PA+|>$H1kRCKUvCTQ=@B?Rw%GgP9B?T2BG=i`81Fh z6nUv<@0U@pXLt>_?~$3sg2ivBl=664@b}rb;yBhF0PFPco(J>`Eo6}ziqk;q_blez z#`Ea%xvnjFp3o@_v^cz@%N23#abaOAX|2aHS>a&Ob19#3lVNimaF-X+9~$T`BKmwE zRPm6;24(PiDs|QYR;RFmk#-VJm7B#v^H!)CT4R^E9Il2qB}iQ2MBU3s3RXe)Dk>VKd^^IP!A-j&ZTnu1F}l-k(brGaXboE zlo~uZ20iys2<#we0vYPOu!x~WB0^V{ZrIiW;(|_*Vq7t#0wQjL_G%bGdO5s?MCq5r>rss zPfs3Tj#7`3xGPsiEeUyaMBBeZg5PQtZx?bLx99NFZWkpLr3}cyfop*fRC&DD=NOTa zeD=DdG5nR7`D=@0I?+wZ*HJF#(}WsB2wIG;Yv{r`?AUxR>pYD{c78&a8N~_0*$lNb z;Y-|2Eh-k#4S0*&ScdTDs#v(Y444l`0vXamVP9y4<4KJs!ngP6b+uR2#s0;&(hB&O zyv>@)d*Ui#?g|S`AA>=%KXmqomVlR6;e^6KYau#7S?1Wxmj7Fq-ab7>v^Fya>Gg<} zJ#Ff=)8)9@3jsVxXR~GXiWKsfxW23z>+}1!+dEwI{AOU?BD`1-THp)m;9LbD`SVqA7vqcDf>YqWO`ahdv#|;WYt`fupIF(?=M`; zE3~3Z@6cP3B?tmHD0eq!9Am70d@;VS= z2>*hiD+q9lm0Ob2&m5!k@G-r5R}{zoMlsmFZzyC~lA6Eu&;I%u{cjnw6h8JAMbR?mnNIX!#l1x8Gyacr^ugBR4yWzUNJeGj((@(4@< zuwa1P11wOgp!FAt*Wv~6Cb;|LEKyOcTK{x=$+0BSh$pDs|@HKsgf&UK#3!=^HQ`W`XEA0Zd0N;Q`#nH+L5tCeuDA{knQ1LF2JJZ=_v1TMN(L>Lk|#wU z9Kl+mO2swlFB2;Ia-5=p{URX3LU~E3G!7NvL&ae*SwBNZPX71}5sj4br@7a=_x7(5 z@3!)6I>CpUU@8#?=q~hcv`kmOLGdJKn{{=a*e)dzp(ZzE^9+xsRW{;f;nEGC$M-6! zBD}pXqcGklqn!wper_b<59KTWVZk7O9FU!%AOvCU_>S%L>CT!Yach-JzsdT=tneio zkSw`@^?4{71c=XbXvsD>B1li?*vFjORINM}$m6;mWaERMbeW7LJMy5VZ%x5n?)vY5 z+=6yiMe7O_gY?dl-7FUSja@Jqx~s$4!h=!_mnNA zQI8)_ncc;5@1G1{s5AuJxz%!5kK{*r7QXngwO?+7p0lFwAk}M8$9csNpZh=+HAE#wH@s!!| z<{|`Ta$R{Ra#>he9NI9{dFGTL6M4gxgn0)79R#x0RxeI^F&>tEPLRF`XmwSQ>o2DT z7+`fC#B765WKarw0GOXZVi(!+*?j^1^Xk>B&Vr^@hxY(53jhTndw`ib`s*7aGMu=# zd~e3@6D-ZA(%Kwpu!(7T*ow~E>T&wz3RnX+AlozXW$J7)oSNc1fyV;%XdWz9i4erm zj80p}%rYjarpC;?(^Xvt4?QCue?SCkXv}`~9&=^Jr@~)1zG`_Bdh0^?f<~1DS^9)5m0c28i)c!1pFWdDn-4 zO{kQ2()RV!I-pL2NieYrE zxa(w2+}jf1;9woI^kKiVQ_y-Us~^-U`TX9Fy1m;HnGUb2tZWCKn*fl_`GI{pHa;%U zoR%!_mkoqNRaNw$-oJ%LSHKMC3i?YMg~4DYU6B-P);HI(N6R1?}Mbfjdd%ZJkQajc!T1$9K6mh!iMe;yt2Z-?VR zAyuD)qNq>5F7s6BRQFGHW}&G;6Cz^cA!cCk3A>f!7p1+VLmu!SUk|1sW0j<`4S`K> z#_NU}k&XSTfo9wR@ZZx`R8>vQUO0}bDbSFGZ&oZg-LxP?6hj_qk_Anpm0#pF)xgr% z1qt7DKjB^nVk@ICF6S$rqoX4LH`4-u=n>p3KX!CvM@Bve!kwlFcZ~G>YOsk?nfWBsL` zzk9TxVKshxfBy~2WMh4D1rB_~gfL$iiZJ&xe`lS4KA0sgn4%f9 z=$=D&>dKgjbu^Ghuea_JjqkJp-gQW`!|7?>r;)j94swl0jAJSO8C#sOU}W_b;3b$M zdX$n$Hx~hIxD@H6;7{GDpn-`jr6%1mcrirdyE|v&ikv(B(ykRZFy*OJ;~`F2-#bqL zkV=gZ&pb&xYVom#YpbMy3D8z4mbhgI#;cy@;;Fl>T&>KxiSlrR#~nIF|42YrpJO0x zFb*mOq+52`9`$>m{Ikj|fRpB>(!ZD7RvQ{SS?lT}0}K%f<+E&Re_tL2@PQIudC4%^ z1b#%C2c<<(iggOmmMKJ1dgw*fj9m4A@^HoLSkcKaz52Ys@XsBKrz|73>^QcqRL&zj zNdM7JXmC`*Ko}+!pZlM49NEsQMMu3(q?NDoX<35Z&(=QzIAYieBc}SvG~gA509K)0 zCinABbd`z!yrG0fawunN9)IvZxeP1RO{5SdiASe4rVh0id2m6du{U%|Bd? z2*_xzww&hLjY6W!MJL2iww#(MR6g&s%cN*{D%rnYFXKq&oiU?D+x6q?nICLY^wObk z2&DByxiVFGzx*Jo@CN<(7#zA{dZ=ZtaP0yv3q}bsz5r*G`aCMwP=wYNEBpHTRz^{# z$opX>ZIGv5!97VLnFK$1Zu$LW2?FdOP)4z1J`k|8pyJN?ibD?VI7PWy8X8inYCB^A zR*jsr|2dTrjNC6(zD#w@(oexTc}_3G%D^b-ZV!0BU(F`c863d8+`C2aUlaD>DF_BG zRNNU&3v!6pS*tfG*d99wCM6deBpHRe;-w;6I4M2_X+dN{k>zBac`<%%!#+hRr=DwF zR1LL&x+K8k>IZ`;ts20_2A#ErnN|kCvucESbG|bi^>}wdNuGTH`XwCl2&G}%jeQvH zUwdnAfXGzTBL|jLz@y_sz;rUDRQo}3oDb-o;+50$GBUKx4$4n~t}K67cKyBcki9)T zfKhGQ>a?k-s;WBsUYZ;m=~;PYWpP6rZqWJvb`5%o_f-GTLY{8<9jz05N!&Bjvm~=uY9-`2o;AaMvp_~ zqCsPi?qgTyVZu=!6bqvm*}mvRJ#zfEp9}MXW+o=!Ob8K=4n5p{*0k+-2>P38_o$U6k#qhf;S7AFn585I?<{4VFC}}11#KZD z?N%gJ+6*Tagkkb+=w)ps}{je+-ti}Xh0K{4S_3MPmJ##Q2LrZ5X$mWAwp*N(P zE>;#6mFxPOaS*Nw00EeRjZIDi7Z~QHdHHS3anOhr#8n$kd{AnAI2Nyh=$JCqt~I&B z3uYc)GXZx%#08jV>Lw;%pMI~-24x;mo@4;GE)%-0pX>Z7dpM4OEAZX}SfbM)=|oGU z`N{lvr9tSa_8p5M^e?B)%&5{TXY~QOvUhYp7z)DPZhE`_eY$M|0jr{tk^o@Vke`PI z3eV?LYB^9YoWOiw`H&vQIJJ~SBiOI6JAfQ!MP)g8QoKH5lXL!Z^6ybN)DMV9GW{edC5 zf)hpWZOMWkU1j45B`hqcdLQ@LpaBao#|sM!DS+E$+8MO(fA8}BdfCvpZh5WS*S#|8CIUS5 zh}A>m>o;I6Mk0RpzEs8_>RLfJwXj zs=-_aEF`LJ1>U|!T{&?D9lH!NX$D4|!OUIN-2BB&FmDM2$tQ9Bs=} zGf1pS_)*;=(e#Xf$!`2;v5EsN3|N`k7;+E=zk}^Q@SJyv z2Kt9Uv+gR$GNtD~LXQ|A#vewZOLI^f1KM%`+?W|aXP@mcsw7L0VgBAMnOhZx>nNk?f(G?`= literal 28674 zcmbTdV|X9`7X_HKv2ELE>@>D*+qR9SNg6w8oW^G3#;G;qVJOtW7Lem)p z1kv#G0xc0JwgdsOx0e+8uHun-mgVk&H@DPuk+#%%+@X=+J;-igBg1AOb=a{$hIB3D zag*><@EQ~pMan`F3RFq*3wua_Y=8j~MXKFj@SUhO)4SFV*{63-+NA;TrR4gy>6R*M zM(sMDHuFNyefM$a?0k)6Q{s3DGL#`AQQ12XGL%RW5=n9;D01MNZzb_URKVX7zQ>{o z0l$xjv-*uB1>9nyKpLt*0{kTyhdSi*6WFs7#J@ftk}C$22izzD!S?^_5kB3@NapiW(hWcXk3h$K_S$?{K1u|AvMP zuXl%dUoU(6jB4~gIXVkbT#1h&U(w6r*T7?!a;pz?$VKZnnoJ)FRc z%GA^p7$D6&duvu%elJ0qVd>9LH^aK6=GW_yme1>I09Uix093hHZtv+Jr?|7TQ@tx% zBta_Mb01quO3LGLEBxuQ7jdcG)4431_48nkm{W9|plxk!+wbPJ*9Nz*I6qzw1X^yV zrQN;9=l^JXUUWP(KW<020M}BzB`TrcqhTDsysE0%cqTNFJz4^2|LIa4*T?G(*W2yP z(Ae1837F#a18|VHsd>WT-;4*~_D9oZd{6Hd^al@a9IyH@FYYtBocA~HygomX#%_y4 zDi*UysXiG7j|)OYN5@;;ElphJ4A`^HTl22tehHyR@KdZYSjlZRAt-N`?nQ)#G#mu9E0U0Fp=Org6cT!Ci zvMD79AgiNdU@)7n&>YR?%W&X~F~rLBe}8K8J!6^R4PH1geZ1VN(C;EZiZ9lUcPh?u zTb8SGZ_#PL9;OY5Ai`(2!4eLGJ6d5Ri@znc?|h=5r>B?6VE?m10rG?AoD(>T-mi$f z0cFE?lKz1A)lTSm9Tn<$pEaj}oK7F#Eke>So*9*->D@ zB_t)y`O3_&nvJE;r3j};C`m1pUG%^*uAbe%VlwPD@BBEKAeQ_I>=)uV*8yJVP1eQb z)ga;LQZMe=bX!7!t>@Soh+R2q`SAmxvhAFEe=K8gFGYWyP&ipaNsbC_8~9-#IJf0L zn0rMTcH2wlIY~mPUjTQwPphb)%l6oYX18A-#Wv5ZSy^yt5w&mG!_Z+^V3-hi+zjS@ zIjsYtMQ~V)^8pG>X!BMWF6Z-MfxGAX7{|sg;FO!dWHyW)VI((*fQ9Ry{#ll$!`J~% zaJLUte)SL3mrT$7wAbf_uA-Kfw1k9&PMji<>5JE=34gQq*QYi-eZa9oLqqW#`;Y-A z1WaZZ(f{#lRmVNfLAJM@KB98%S77{(aqFrMF>~_+0`+JR^GrtwM1fbP3BCtBjx`r$ zfgf@tN80ylJhxG_m4Y+%Yy9p#(3G239v=H?=D(5dS)xoe)yg3Mi=>ieKOI~DU!vky zA*hq%<4>=rrladSb90{j4tdwiRrFuXCYF_knm zhdy2>J{-;(7S69SHR#0XFizf=SY?-2R)FD8E1?+UfAb`pB13_dl$0!135`u7kbMpP z+5lbwH+bZtC09Jtde*p7K`x!FAI_-TD4VN$;`w9 zLVk>~|5~bd?N@m<_H{k99;xE+d4-tN2Q=@MEv%mN?^_FWD(-DyN!an zhrnhG`FQaE7z9GYXvf3)$n)Jv8ju1$(?|ks=9XgQn)j))CEiMZl-g3UDcK zwS`RzBcXagka~I%cwzuQ_+~ziM#?d(DGVb znGa1e$`YZ^0)F7%1E&(-FS}{+1jK+Dd4Vk8;+yOJd5bi|8acRAN+2e->A-ocfaUEWp7NC75f2X*k`UDp9*|cu z*ll}Q3!^J2OUj0;4SO9pr_Jx%yLMagY%CF3T4)ecd>{Cprol&QEo^Ll*VVDi31cg0 zB!{AKr2d3YU#c6bPU2+@iY}qYeI^=Ig~|K9QB4;e76!p8TToVJC0OC|j55PoT2)n? zmlt?^d>kY1G~u|iIFFpSL|D+u)S(`#OQNBq6g0Rq^tj>MiX@K(w!`fvTUgwsI8qxS zT2S9uA3#84b@BGk4>)H(;RV;8EhZqVc<>(ilFDELu^f1|_=4-Hn|E2bv*4l%dbP!+ z=(H9TeD5JfcTZ1`3Zl7SJW(J%uG~teY)}>h2Z)$6sv>Rq!rE^qDTX5sIN?)BI4}-< z|EasS& zj=8>`xhL(^ZpD!SC)N4hPi>a_cCIYjQ_Vzg-Bk`t9W`z2e%wj&38-#y*=76dbB8Er zs31@Qzg!sON3Z+({S#6hsn=$*H#Uk-!eevsYB0$}hZyD;TK>@l@YfLk<)U$%qOx-E znAuLaz^h?dcw+o+v#m?mPJOrugQY)<^S(?CjWSv?y>{cs4UgW&L5{!QgZ05!hPz7 zcfQy~MMb%Bzt5el>|9;0Y`iC5UtfQH=H0?zQ)XG8yZx z0@0VRz&a%nv4EtE%WeZAa=VPKuG78oe&2)^N8>!?xMB%Aa4GKM#Fv&um}WP-uyC`R zD1W^j&FJ)Yw{UU0I-17f?k%UUw*lSTS4%g`PZ#f&`-xb+{)Q9!{foh7c|3RT8Ud1+ zoSK?iS@^Fh4}6@lK&n4^8q2VGrkWy3za9=7tzYVmeu$8mn3`VgXV^Pk4^cimws*F6 zw&jb?EYZUNDTEq$c+ZxD6u-Crh#3I+YCLbuesCXCeL~9Jc=5+>M8CgL=(MR!hF?H9 zRvo@}y{uhW*xOSI2z0%y0+8`%Yb!VZlqZjExs@%EMd?Y!;c8*ttj^jh`u_#T~Jz7H6! zhIzOc?6Sa_)M^9M8&*^6HbAOcSlL-%)bGR#2u9$B3P7h-a{zKtjoX?-NAjGl&P|okZE(e~Q80??ZV#!I&PbQgD zt>@OUZ;?Rq{JCP+7MlZrQaat%`j$MEt{5L7!BuF~e@_~#_a)dmROmDpV zFMe&i#i$?V6SK`%>cam041Qb(nKK>7C))r9+5|B8Uh7F^0T3syubtE?G`eLu*1!Z! zP4@z*a~i95o6lIE^XtR6e-%02maJ6P9G_>mVSB&SJ%INzX*5`&bF6!iewu#V#LMyB z8g6Q`DBai9&lw~L@e&|*8!(yXbTHCsH^vMlQhx0zYy(bDPT>-EQ^YKrC(= zzb`i`rQ9k10T5!zvEZCm9S51+PBqm2Xo+fSVq4Ew7>=egV}*62%JVW?6eygfOZ6~W z!WK0l(L~kCpAV@_EiOu!nv%m}&}{-MSt)jQZ}0SmU8|myg%wY469xQ+ER-s0x}0UQ z(`2dE^b^>C*eusamX*-}!w&*LyBWYN&2RTBX*})>;do9G4_xl6j*t(l&WM6dNC>#> zF&cCOfI&NO;*F-WMQhu4u#s<2<2x*~;`ksBsNecoXLS$wzuyqbaUYSrU#E1PIxqMB zSa7$Gmrz3(4!Onp3@8f*=@j}tSKv(>emq30`M`?%j$8m}R#IMW3;@wp0MoC%Fnt1$ zPy7h58PLA$U2setL0MV&PvrDTM3nD+rz7c7Vv_}9Bv${PNnDt99F6;byy$;o$KfPe z%{sF&xd~kWz27u$~sm-b#hke%@yI>R4mlzzj zDAjT`SMdxfu=FwmjpPcR<+xWMN|4ghMl*Ik8dX)by+5Bk@RadXW(0_O-KQ_2w~+jj zxQ}0|Grw@E`29PCvFjBjU?)x>8@T+64`5+nEG#XLQm#yKfItX97)jx4AZvq80Nh8K z3TbiBwDP>nfYJ-_Z*9| zAWKb`kU}Ig?l4Pa`5lOMZ=uf=BgqBKE0{6;*iZriX$@&AABboG*C)K+d}~r%*Mz21As+_2&(C5=Y;=ZL5y4spUgt_yKwhLsvwHi z=3o^`>P+x9QNb}0zuwNsdmfh-r?Ofq0Z&_kPp8QLx9!-1Ijh|2r>@)878oU~%1kE> zV+fJRL?<&MG$u%l$5b=iZSZr1p)E$>bqnzoID7d!2eP(qv%0M-4`55X?V1YA6tWd* zm}C=^lQ95SW?u_!UzQ=3;7JX0Q15DZ0}i%IHunSkPFUp z-u(Ra5#@uZI<@um#({*jSZgW`!NLZ>l~!FBbbWbghRy*e1nfwvRopwGck8e&$+>kY zaKW&E8mW*4rI?JzcjcR;nis zT^j&#SJj7Cyhc}1J-gbFns)!4fzN4^FOVmY@KY$21jrU!=e1U)0}FAUg#ZeT#$*K8 zkY_Q!Lzv+ukQ1&ybF>CsqjYZI%n{(t_YVr>3H2oSXM)OT%R!D}=ia7_l8lqn)3p=v z!>xwf(PWb<-V^*a#- z>d8w+ioRc3-8i>(&Ug)7G^#niyyTAzow(q=9LAHYmW_^c?OiWC;9vW5Rmy{7eTVb_ zEMTkHoXz++@y&q(34Tu@Qz3x@>4#0?tcIsISQl*<{0Ka!Otp1&)YQ~lr*#u%XUh$I z-0mxlwlA|;UqpV!m}=^{j&VFW?WhgNA3kmGyh9Rhjl^m{9Nj!e#kj|w|8QUTq?{A} zDM9uUSVpBm^+*CeP3=RU&g9{2O4@1h0qs&bGA@Za?6BnB*-_`PC1lsU1@Sqii|zrX zz!NlOmJ13Ht$I%G&ac`V8`(dh5*I(9rvNgPde<}hOpz==r2$8eV;Sei1c@7zw$#*& z8-)Iuks)nocg~+RqYn3NSG+n1gFC#e)piA`S?Q({OQoE%J+TcK-q+EYp2HUN#Q? zk;a`O0Q|yoYuUQ0pLOhfAykOCwzk&og&^5fQBzY=SXlU>=F@b5;7ZUo1Fq3VU+T74 ze?+HQs|%Hr@doF*(&%_YFL0i8F~9PEu4JYXd1i*ifFkizV$i5D0teW=&O=S1bjo!% zBq0EI30(*?1pN7nGo6O*Hb^Nc!xgAPDx;~^CCGX{cN8GK@-~cF6bKpYSq;|nNN^C) zp+c*^2l%gDJQvFan??Vrp=4(YXeN|VUpEJE?RV$Y_4fes$M`Aj@cT_mQaS>F^k!O` z(UnlU-GdpB1`q8v%>Vr)o}Hg}`*__9Msxz;cOsK4ob!Yb=ia6;kOwDH==anS1tbg= zkyG?NRT+KT*2Vve*F3B2@fwUv9CRbwz*r{dt1aySFo45tv}PIsa8qx)=M4xT87!XN z^L1|gBoDolh)3j##D+_JJLKsdu- z)RWQH#vR0WUnSVE2bT32$`)CE1Lq`azg7|>0`<>chza4+R}El}Lrvf3^>y3{+h^-0t}ljs>hfLo#8 zC4#0iZ;wQfXJm6PM!F&=mfXFxm|g%}v-hbEwcm`dSo4_`&$KAhn*$v42qNnG4&Fom z`9Prg{Q1MMFCv}~N15h7aeNv@Ht+{4bgH8TO$!50r$xT{^^2EK<;ahMxaXHx0zRC! zwehWR!cO^vqax9sYB$H|_p1l_>!#w5QC!&dy!!f>uD3fz8;!2)r4J9sxQn~Di?zQ3 z*%=-BI~TT}YI{O`pXq|dy?9N<8l~;SgN9tXg4pU}SJLwP=H#qz8BJZ)>*Jun{-+XF z?2OVHX{uSmh68GFQzVPWXN*%Y7_)UzmqSTOX%37rvvRTPz40Lc0uYmj z9o$=j&rC0=MZ&Fk7_tMQXMSw2%Fs4n+%vnOz2WzQ6rppj!Y^xjU9D& z_!cECMa3W)H<0*0Lm{^d7vs!BjTlarIT94&n0iRyNgev1-$eo`T~=D`M-eDMX*ots z_H+NMCLD2~iRO`_$=@+S@BuhvL6+5aGm_|kABh>voB9Qp8vpwTK&!Pdw4#EOpJDz( zFSd>=Y(dOf3hn+%7-Vfm=_C?j__-ud|1AJ#$IU_n1tJjb6m?j&gvc&}2wR(QkM$mH zr1r49E8=t1ghm(e(OdIf3w7jU2hLUdoV3bQY4WsA3Ji1xC*`9zwB%XJgU>>So&s9v zy&qia6l-jIJ8ybM2I!O|@f`G9DM@8Uh%t9Mj3Xag*Vq3`fI#X7m%6Ch_}_UA>>Pf; zumJQAWX*_EBa#BnrMD-!$3hX7I1E`w#CFN|8vB3yU1$7PMomC9RS_)|;4=V>lOTgI zCotBg8aU8zOxUa6QdHJf98{_2*MxQ>26)mC(U(AwySuyDxw#s{-Y^x!Vtof5)@5PG zFH<62(1w`lMQ05>y@F=ax=8+CqMUSZ^+V^f4YiCmnpyCQSrQE9*f&5sa)50hiv<*z zKiW;TW@9*?%;ppGG&>qU^d9gq1y6|Blto!;K9X|pwUiQ!ZRWCJ84FAQ+@>=h9su|q zj*nvcS4FKa%v!;YpZ^Cj0~3IA_J%9P=BfTm0UVNKRD=i#5Qjqmi9ZIQi`4+9Q>)aG zBtlv|@#Ny-Qc+P+0J2rvMdylqfh>Jl{v<&ZsBfJ(*-~0VEGUU%@HL)$myez~=lH-p zQp1_yF;Bs)tC$6h$dWch^cR5T?RuV;K!+}Wef0SpO<-zubuVvn7f#cw7lvz2WWx0Cy5|b!F4k)C2(pM<-{M4|nLQ(0WK)FPL z;jn!oa!ubEq@{YgMpqGynL~49>4?}rd3<8v-uO7CEeuh$nP}!b)N=3IzxK!$Y%dr< zI-R7yUf}&r{iX-?2|Vv%mK=8`hKEO^k10e|v06thskZ$tR7z+YWYYA=+CJq2u z+vI4vu&uqQzW&$IOfie+waKU0*B6P3&1R+I&X6H8iOz%?b7a!Cdr?%yBpW>k?`IVD zJ-PfrEx5a6{JK?6ZT>}bZgYd2QAIKL+k~uaVEC{PN0O&q*sk|K$E9b}?Zhjg=Kn8b zQsm{4@IA~Bs~vzL?Uy~U?Ld{NEo4JJ?OUjE9A7dviz_4zFL{K3{nq2d#hy9`xOf@U45o#|-FfiS{8_y&LV7JtdqRMEgs1|pujcTxJ7VjB`{kpm)2FbBgeSkXOs{h9u zP*+0iMGDQkxxVJSXuoOR41(nZG?$tAu{29J17(%vpjvWe{hHQp`CvvV**x={30W)( zLR*WSu{l{la#$Ory;;R$XU!=UhNT)g%HSJZHxPu*`+uN}$#!3)x6hjI!`h&(``TO_ zsWb~6ED3(Dc=!Gn_fCgtX|GYh8%7xYu3*=FZpO}y#qnLy7IfY50kwkP&i%Pc>)RN@ zDgwZz;b!U;yvwCq>%l>Ad==GZU#M=mt4sOxOZ#1dALbjb2ZqJCd-5*>W{SSqT7VAz zU??hLVi}&Ho(G|+d0)HmOIgtO&wAMK&mk5LJH2vk%Yar_Ro%LI0xGCSad|~Q1W9N8 z5jwS)jB>e%n_y@tgxxgD#8d7o^g7bD?OH+?^t|Dbpchhf-LRWhoksL1^?kpq&mO+( zHF?(tdq&T1jY)N}hAPNVB1t<8>`FPUbyN>0H-bQ=9IjZWTV9HmP#@gP zTo1AsUu7Jb6sD$ls&k|B=arC!CdS6g`a>!pzW}BpH?aQe8E*uoZleSdCd}UbgR?gG zFYFm^|4o53WTCZ%ef#~gwT}PW9Z-^i4*-~c1|)$;z*K>Fv4=@8tg2$Q!lfvR3jRM_ zN$o>3Y3)?>6`x-wJYDP_Tn}}}JZ4JeFm{1MU4cws@EagNm`~q43*3zlXJ}pk7 zRVDnot#=-E=f*w>cFu<8dSB8%%;c=e2l4$@hb55`O!3 zxe&YtpAPRtOFT&w1ob7Bn!th3SaaKbvkLP=3nSSSfUWKhdr9gjv0_|%zmzpvT}siq zS$ixOB916J|M}fR)ze)$c;woWhAczPF%(x8X32Vix>}KUhd_(rSqb3(bgM3;A3a#% zrXF%LT0n~cCz@t>7p0@)?sfEg8qi9%JL@Z=(c%7+eiDbzPb7*2@i$6g`fF=XA5Wvu ziI;^I{f~0?f2AQ7l$_4FChbV0fdq8b8P!jzZNKc-Z!pcem@oqXDKSa^bsMwf<)I}A%fegg6JgbW zS>4sA{Qt+^mv(MLq!x13EjDcYU#DySLID^p`FK7Dz$eV2;H!o zvTz>VLjMPWK-Bi>sASEO{%<4%6E{qX^M66t)JW$cA`)P5XfYrQ2$Ci299Uy%m~@kD zJNA0i_deU-9n8M3Gf`!M>?MX=y`1z*E!IMb?_|Ecg?pat%z5Y_?7!d3$vy9LD8I>) zd3T@GZ^cnuir;1Lq5AbBSOs5x?+^{q2}Ac#fR~j?cA`7AP_{h?{wf}hno$%!tS>pBSmD(VEfk)ZvP<(W{<3| zq%^lDKZQ7dE-9yakigZ*MS%Bb8`OVw3KR5eK$0RwC=;$I`W9M=-BpG0MV91pP#I(k z!9s)Phy4>$kSqz!q`EXGsQKy(w>{%p#KO*Y!^Zk>4XV^PXL8<%OET0G-z@@)`3nQc zG33YUZW1#=)gLg#!eC<}%l&{UkUXjljrdJP^!xWbdbEO8O7Q$dH>Ac@pJ@|h89`SJ zq=isxrfG&sTvhZ1xfKWn8w2;9A%?u^bF;m4xl=Fn@YqT*S###sGWxzRBJiR8W9&v- z2*VZ2zhkksQ)K$1V3I`M67mN~Zy46VGqVIde3cM72;5F6*7ynS=1IplWI+4nCRZ*vEkqJ|o zBgCrNyC&doxE~ktAu`>P{5PE7#8p@Nr8J$*=A}G-Zo65`x{IYMB|Y4|gs@{KADd_@ z$Jl~m_Bth`I96-E4zh-7r0d0tTcrQA2v3FZLQ`Kc8)jw9S%~XjQR(j!{t=LQ?e~=Z zGWw%&viVC7e#O0K3A~xIu|1^jR~7^+B&$3=tLyT=98ekTVv9VFdZJ(DteaEO%7cyB z0Lli*pli78##nEQ;%#z8~9kd_~UEnOIn@*7dZRyV0!slXM*<#5zY``J@p2xF7|^BPr7Ndx}+X zDnwR<=ey#S*zyn}qy|n2dDjyvO=hc#bu$I z4p_t7_``WqAH9_y7_4>Z6X3kVa6V{IUj}lr@CKNKGDAezu&|4&9|DerG%pFwta= z5D|Tzjo1n7Him>eJtg3Aqx-B(Z8K`3Y1Fi?v`UJ5Gc59qllsE|;FN~K=L{U7*oLJ%X_h8k(*YjZW#r~3v zJOO8o)kl1taGpyzR|%(76iz+bD51fOC__i)}Z z;%p9-p%b$2qXF!+(It0tbA{wO#jU9-rBR|S%R^Rm>C27dx3ymn8$<6EG8toNoRWy# zZhu2j2nJ4!PQ9-1;suniUMQbM1+ZvGeC~BL0Dw->O&TU1nZWM>Yt^ zCW#SH_ESVrIA;xzD`Tjgmhe1|BuR)!}4mIzRa5%gW*5P9eVr=cDTVN$#rRb}-?Qp0P zdpjb=(%sHcCjLS&`x|#*|9pM>@h%_hL$7$JwtF?27tKqhMkxlekBdD;<7j#Hn-yzt zwdH?pKy62<`QnzRhbF$hJcU=bRLnWeE!r4|$mtcl=W~pE_5(vUHN({klSs595HM-h z1Y>`mo1PZ&nr?y^Te1|)W@nI;G?g=M#eRR4)LyJmt07L758X#y!GQ@7>Z!eY>9g!} zU{YJ^@jMo~@~sy64yvAwB#@z8fn*9$MrS|_-+FlQJgph@0Q%D&*B?4RW#jK^^P7=^ zQrPLr5&C^8I`C;dcMFY?so9&`n53iHq+dW7(ku>M!g)CBb^7Z!XS{+peA5l?F`?g| zJ%ikv4%_|G!kI;JjMz&9dda_7vZgr16$!h(i-@d(uof?(T5V9>yC-xDNaMQry(TYq z`Q4geCXV0@&HYgpfhfbt73!9#F=D`@FA!si;B5+)wI!xd+uwGz>&iNc3PzW8yyZ0E zLLpJ;OM`#+YSilIEz6MyLDVD3Eb<#=b%pnoipbsDcD4qy#|l@JNA_oxGybF0zqhPy zA*FP4Z-3?6I!$(J$IQhld$}L|1^1g({Q>XOZsKRx@#h5)a~xIGG)_knfY|dTQp)eQ z9Ap~fv?qtp;+^El`0EX$>#j%kywmQ?6$k}W0Ig8*865zyh1sTkq~TXSXW z2zpz?0d*Tj)aE?4o}!2h;zqN{UZV}pMH%POap7n15JzQMQjVqlgyN9ai4moIywZwY z2UGM9_Tn4H8>sa!Zsfo(YXD*_VO_io& zMSoZunwaT275iGLU{Wz?NAucEqh2o98o_rhp{omoY%d;%)lY?Vd8Tqg?7H-NYSSJyc9n?!pn&fHV+)UJjwRhMK~x|E$c znh_roJ-nwxGq@Gem?EDBW{Cl}{P+zl-$=fk9Ld_MWn@&ok<26xx}KqgL*VExmXd!6uAv&mvRc(@+D6A2%< z6M1SLnkm=PERNCC0J{}TM@7X0s#J-Jxi@hFW95=a$N|bA;HjQqEs7c8;*l3`vGl>z z#}IZ3@BQ|-bx)%>!tv<4J}Z zs_Q47Lche=;o9N+;!&EL1UnnL(jg^M_=v zw4M7DlUN1qQ>Jbd;J0!>wUH8YcMB+W{%AAow2@Va|8sqks4K*z()OsX9OB)V2&pRm z_x3~-x*@-2yU5@ejSF6{{crRLL{(L^^T!J@@h>$rp;)6Le4CKxLK4-2_nUXe#B&Xq zcAnZX6gH(Z0>4_K(ISMzV_sauOf5P)n4u&Rvu~86iL`1#TzmPCg8ncqzZYt(X@p@r z;24MVziq_g+(kkidjx>FWk(Wz{R_SPO-wmslw&RgQ*f@2Y-kI8;iK9(*Q6q%xgJE= zJpG;=Kn|dn=5J(L6*ln>gm%5yc0Fd~8u;TBHD3G`{%!haL{S-ge=yuon$dK97{kMn z?A|MLK>iNhPhjsK%0PsFS7*|UP80pBJQQQaIM~Xru+)wu{u^6W_1_IAXES#W>OgYB zg=2LgAFDw>Q|w5I^nD})2Qo(8s${&D|C>=4e&hA+3D38UY1%@FG2M zmdfyBQqdRkeyOO!tl*Ke*K?J4xyw3tBcJtD8NEgq#WvPsRN9J0*L*E;f3gDir19+bG;Dz9)ONNGKQwvuM3oMg0%xmfvwT zjSmI&ZJxA#NVaP1Phec)ZwBFOAhHoz`$(#(VKTwORY9jt$h`Sf+j&cZid^!(af(v( z%y_R$oZvG;i-E|+B30(vt0Nd)(B=Bb)H1%Z(+Kmk4b!s<;+7VFs=F)z74ntTjg!Cq zR~i4f3xo7MzD%;5i-)B$Y^1f_k1{7`g-}fGsq?d3K+It-h^D%3U7TlTqK%Lw#-ln- zmASCrY*fdMAfx+%y|vdaER|4BqOF8fn@~4^hvkF?mqWc?VEc9PG+#S-4c2W-s>3TP zt8Q>vBGqB99m=s_L?S`;r5Arv5f&d;lv`MTXB^YphgL6S>>?63`^%L?fVIBG-`dUe zQq4@>X=1pIBiVh?QX}*u`jG3Pl^HTL(Pm9k)wICxbecN+7=Md-Jn4y#1G7)w@QUe= zaAxr~FUcNK&Ndl>jko3_kF z>wj992S=EkpGYS0F`eu<{LORobch1HAd%-MamCR z{5Ey;i0m%8Ufo?-tiwk$h{}9qOnvkyk2VH zQDqS!j$_~!M%rX;aKnfwhSTdP8gJ-IqLpy%xx`+WWJFAsDA{v}$q_Ps=d*&zis zT_}IKmei*MxTPmPTRTlvv)tT4kw_-e@$#wQ2u-)J<>fRx?XIyXh#<-ruB(PR9uS{%1ezlT}r zJ1t_6;KKjK*`k)Wlo0AJc(VJUfw0UmVimGQz1{T0D{u#v)TOdFL?i|pR%?_v38C7v|5T+eT2*n2 zD%A%$<}FM^*7gOAy@S;0l2zPkZ&#}IX9#UQtMp!cVkXGyja|&2&Z=2=nGn&d^Sh6u zl@9iiAhxQ=anNiMhgM>O{6@CEHG4=9kIP%7>$;BZwq3R8H2+!3dvPb)Jcq|ht+Zr2B z$~iCgRHM}dDb;7Gxero}VE>s#863YEa9kU-jgrJLR1<@Vu+g19KsUEK!Cll?5NV-@ zviKPhgnU5nixR5_x+E3itRpSrQqlzFM9XYr*VxY&zqCzc>;S zg$MNQ!8vc*kV+8aK|SIc^!gJ|cOPVEr8d<=iw1tUFEjl`8>q1A!Ga0vT~rqJ7itq9 zeLYT&nETD5|Jt_0zm1UcZ!hr_PxTM7+}|;KAJ>6Ym&AF0h9L9jVR|$>OvP+7q2>l7 z;%Yd%cX!CGkbcG94jo6A1}biZ$;TE6a!1T4yEhr6zbj4H!Q@ERedQ6Vm%*gXTk&X7Oxf_x!7TrcOJpF3W3WP2} zx|@?HupJP|$s3%;f4skix zQK>`HesqEId*d|vK$Q)zRoQ}W<{t@TaX6(Su_}kR<4oDqS^Sb{4L_5WDB#~BXjl=2 zB{fIL=Z5X!`vtfofxuj7(ckED*Y)M<0`ESQ{Q5H%jaPLG%^8jjjzzM2hnSGVa*MR~ zmZ9yOk*^jsync3EQmi296e1H@ltD&e1fYowXkzP~iWMQU!mlfxiV+3C#Kow^itoDUIqm6gfkpl=siz>z+(2 z?NL^H*zau75CtP|EpN+M`SniitzZFM16PO1)u^h>L!^Jp92f#8^a92%9)=d)YN-W% z#UqsJxY8)GAuat8W6s^Z0aiq=`~&?*~lrIxhtu zBJNPda&~SQ(Dyi zsYsIpdn6J-A-rU8c`m8I;n#n*$SqPO>;DJJ?F!?{>y*faRcuKcyPVBM{!pfWT&*xO zFO=X%siWeyV&jUCUc)7x1%podQ1>{unVD^WrQjdcdg-L3x>>{_3K3NBc56F(m8>eT zqw4kw>)38_`CG4v9^ic_bl0q19(o(IdY&HFLcJmLD;yOWhC81iBeFIL&e}+>n@cXK zv;^EDRz+BJWrTkSWiYET3}Z#znD|I@<`5xB{J^WFt3WyX^JTHmb`YSe-WhmlVrF$U z^EmGgU-t-Q%gtdwh&Cl>!VfV3wg}$v^`{Q{z@@Jhz(@{F-A$FdikHHEY_NsCqeuYGwgZU-g!AiT=EIWu73Z&pWTr zGo|7j1<5rz;k!KkiI&l?y~UQ3-RU#%CRi~tWx>qt{lz4bE&BjP%AP)F!yc~fjVVe= z^ZwyoB46tyy4(HrX$hn zw(rgg1r5$I&U^$Jq8(Zc6w#8aQkg7saUdT3y!mY@d1Y2;mc;X^N%d?tA#+vt>G)Vh z{nxG;(6(AK-_KTz*su`eBf`?~dBFm9vaCyM6JlL$<>*9>wRtn9L=gsNRqmrcJ-u&O z$7#)NSy*2=kMb+8D)Vusw8G?Jd*E**8IKPu1+p;=OblB1|!KbZ>WKDUE$2; z!N(ow<)o$Pt`+hWd9OB!omsqG4dd~@WiD3UWooIiJ(YjcR%vo{R4500;6{Y2lW7~7 z;yp^(TL>%+6KF5%H6Z-7uzyVG!tq(-@p#%QL!t(ShQOFLM+q4y{Ksu*c%gs%gUxSs zwx~;rI5f_HRp8Rnm7_ibySxnZOvOq}BbuTR@r+D;hGnLLS=UnThhd5Q>LoP z@Yo)B_wBP`rE$gXgue)AwKE&r2CBtUTmPO@E01Y_1{~_`Z1e1aTDox_FJ9C^I8?un zaV=sn;i8^LXQF-7gC!lI)mDbrs39aPj4(ytW{1ee3S^SwQjDowH61b>Zy5{yi4TJF z%`5pGn=8%N^BGQ#35TgHr9AP_yoO4?%XXoWpjJ~OUN=UQgQyGS`YZnrJCcL|<1p@K zms*~*mqQHE2GTPjCRn$KsJfPJi)3n)uh1;S1ldLo`#E`9| z;TVHdMTCBENo?RoL5&>R3TW{{@ex5Y(yG4`Ewvyo8V(_cDeH>a)%s{-+g8LKu+-b(s%rBGITqtj{vN^nQD?Jf#k&QOi2%8HLxh|^HoC9s zivxnWxPi&0f)2#5vew8lf}#bqpC?k4-dioZuO;y9Q{C%nk0(cm>grHKp6q}>3!=o%D zWmj%a8QWx*<)I?eysH@nnTven^~qw!fRkwCF#I28?BU7dpR@fO5%~*8+tTjX=69>rF}KbTGmFbzOY2o{B(12S;knT)@4|F zwH>0W_RE1QdCYm=!X7PURaP}Jo?S7EAx&MIr_b| zo2#VaEYxjZtvH}DP2~y~1x}%tOqp6;USu?#qd*d$O{@2Op&9}y^_%&Oi6@P9`1*IJ z;&v)*m_0Y&I3LE85;+1*vXj%0=B#cbnp16ibZ6CsYTxP~X zU+JmvwTX@?_vr*V&=97r97SQf-`N%nQ|MLjDR^(7dRk*kX z8wx~xGpm61sn}@GiOjypeU#x@xoWy$G(v5vgOPhqNH!maM!5T4jCngtgS7z!ny>n~ zsfIPOc_kH4W;JALfl))I>-|*hRn?xwDHv>WqI5YEv&5aD{LMxS8SWs!X^$*{BD0IsVwJFDKMucxq@c< z>w*ofI!?L#AGMujR29*~?m-0Uke2T5mXwr~?gl9V$wMpM-6?YDPU-GOx;rE#6i`C% zKED6;-t~UCU+!J^1M95CnKQFz&&-}3&+~h>Z-&`JMx}y#WLCV5RlSEY>YA_>hWe5f zx5CdOwBBxTnu(}jEs5Ba^>#3XYi(B(%9Q>%8M%=iS(BMyzahFUjQxiBIN$x;e9f5U zIFi3Qv?xX7*PZu56CLA}_V49d$o5Q3BbvZr!-;g{>mm9K>PtR!WMK)Mj~-!ZzfYw4 zl|#1mUgC;~C0xeqC|hj1*(tb|s#I)x7z|h99SNjpY<>RWOmXReNiK?~JbSTNe^wUBhuNCWjQ4?D9QFE-9=;5PnJ{?O=UGIajLo@ zMm0=lPv;aM`A*bky>KSJ1V0$J%9Xk zA;fdz{;?CPoa)UA79a2ww*zqN1&^o1{+S303J*^F~2Kc-m23mp(Yc7znu-LA~j(S$+0iE@A^R_?JoEsJt&HvZk;nzN;>MUG2-@DS=Mbi zJWe9~&;y3r4n6u*6Hi`0MwR^5WWEqwpUdYp#q(v*^Sjidv#3xoRDjw1#isRhbTguk;?VM#~cv0V? zcjZm3E9z4EbP>|G#(Tz@i}}hpdlp_tjGgKTmyCL7Bz!9piuWq-g{KOJgSlr78tthF zqH4i9Z_%6FPsdc(Ou4>SC>-qkm;GD z4Lx#AL}UFN)9J=$Mz^!!K-2ngb2d_%t2+G!NosnlzUj%FbgY#4qe}UV-;1AuVlFd`x=IIEXI-xmir4teS zbC(nOrUB7{b&b@_FH9w7CV0IFoZ2uJj=}U5}5G4PIoGl zU6I+3PS5q(M5}F?Ent@0zOfK7=a7zzN{-Q$pq*+%gH=a<6;H3vi2;1G=`l8*4!>t4 zXD?YldhLt3xoT-8@)9;$gn8butcX$iQ`vY*a_7h8wQMVs$Sk5S$rq)3+Dc!e1=CSn z&R!z2|8i3lK4;aFU%z|XzhcML>6Klr_`Dy-iaiW8mOr;e{q&^P_I-aa)l1iwljs-O{JPCTJrDQ}J4L(i;7rKu@A32( zq9m7%l-2qwzc!ouw7sCAe>&Nu-2B>tigSmxGRpbyeQ(VNzeu!dsb}cULGrXLQA-`> zDNMwg53ZN|tZ0~}lNOrt{6=a8Iobq>-aJy-et29NMo9TqYN@+cwC=naLb{ifOF5Qa zqS-sTnB8mr+lN%S=@l{v*!J{8w{$599LqaA>6>~3V}EeQ*N+cUZpC)mJdyud;x;dwqDLm|u3g17Yz{o11cQyxmHipAWBlOKYVC<>96xk4ktY3AtCl|8gI zz|m^5Ocm8E6emWuWk1yb?jIK*o@xZCUYLI1_HA;yut{>B%eN@m+$z#q919PP91H;! zTzJ+d_V93(P+7xp{SxJH?S2M3sm##yS6L`FMhxhpGZ{)*61;nCRII`bSzr;(u$bS4Q}H_2)p;Kd22WA}bX^EtAx>V=-MVQ5@L+wzyYQ!w*%fu}426?+}r ze8$M|(eU|OFN;71F;u5r^!MBcGC1`vZE{bi1YLs-__}1H1?G^l#5X#{(~U4QgG~Oo za*e%kM_ohhdX#mUlB$hb;EtpZ#ETpXcdc0acjt{;+*(!og zE5L^>^Q&i{ASNfT1k4t36|rV{EPhe7*DtaATdi~p9OdJb0EI9H^zYBQ-+%a#wf@cT zS}qy9z@x>{uH?eCvJ+XWi(WFaGH-V#$>`p_AqXx#2}jU&Ass2>xrF1b#;%(0oyNAs z7eyEUNDx?QZc_V!D{7I?WpP#P((jG4MrL8;556!b=Y8$SW1&o=(^rE*b+Lz(+w&a9 zwkUurvn2FRVL)ZdJ$M(-T8L>Ba^1m(7Z-3$9PI*$w|4uQ;1rIMIWaI@g)zrCA?kpP+Sx4*O>^~&l2UP=OgCjwh}h%nzAdZ)fIx-xWRlp&3h(C*y+ukVCx zePy(H_280kf2vf0=srYM<1p{*Zh>evak(T@Fma%5CQLrxzw4|bdaXGreBM5X$yiKE zpPg~~Ixj->r8U1`hp)9P`D~cY)18Y)fnIa%@|OcK!{JgQ)K7U=P%sqDR-Z3j1#tRK zAVp$)q#T#|Xsp=l?>e~B;#NmkLuVkhJn+H1L1T_W24|i>TaVk$4ZEh2W}JjYnyyr_ zSZlZLovMhpo#iIpgs2(d)gt z6x5s!oBgiRLgq=zFPrvu8q7gRcfn4-7tA+|f|Cv!+T8{eEIIkG_U3lGT7hCaC|qq} zP=bJ$-)YobbuaHR8F`&3J`7;}J1R-(KAL``#MXBF%kgr|Zdt1YoWn zt5VB8JMDUf9~TnI6kQ{ru@^pTMJqFWP%wzQ8*KSs+j(66*Vj~i$+DsP*E@z;MtIq85QIyZekPvO?MBhU z*f_Gl`CsF9H1nNb0}!Ja!91+;nSX0dTb`4k=T-2@h`|$nEWx@AO~-G=L*Y+`7bi|k zjWkr(;PB}5xY;^8#*@hdBszXA@-|z1!2PSc5TP6*^zB>X`Iji-Iy*oWaCuuo-!^TX zf~>}#LpjTUB*?+OJlGhE4CE9#q{j7QKfs8^uqj9Ky*O$fv^&1kvt2Lx$KBcCOquEO01VDV}Qi_9e~mfo<4zR@A#?yJmRT! zt{*>e**KJiHIR9TC96Euc~z&Zff;{#SGm;uXhSHcvx+>(rjkWgKxc>R72u7xec*;- z#^a8&Y<>9Ps1|QRn|1c*&$xXo(UZ@}(-Y-*&~KxJe^(My9Iwh`X3g|eeI$MWv&-KK zN2IMxn$X0P#`CL8)$z}*-Swg#HC>E3$OF5?-%-`fv1TRfPNHnLX56Y~|HNSAygfiR z=8nUeI;!3KWai=$#&l&Z7he;1mR z*9lVNZF}9sNW|`lr9{*hHP?N>r>;&xl*X8uVu;Y@;GMlZtPolSFn{cDHfBGH+agG8=Ind!jXo$9wX zDO5~}r^kAM=%7E2!Jm)==g-t7M<^p1wU#7T_W`Zd6y08hrG8!-Yn!sPzOV@Awg}EZ zrd2fSh?SIUiO*ktvINQWq6gsF-nACLCx&|?_L~-fm^|&CH3*DiK4<^tt-bu;U^r&^Xa;_abmBb%Y(D->r^u}2?l)5z%>%VR$Wg%gHOu`*;s7hF}Mjf z>G^O+EY5I9_~FcGDd&R?Qy@Zmu-R2x$cgPwnUUM2-?p!ZgQh8hMEM;rSIZY zwYKE1ogh(oy}WyXgtQF|@${5sLj``qrLV)n*GT<#ZgNRdBRWL10#woRBT0R)azws= zU>H3Y!>I3^F!jrtek3T4YrO1cCESfV8+id$sBX&>Na68K4lA~n(apyiCL3K z+!)|x`}6A$y|%zprxi`dbRn0f`#>Y$jmB@OnscOB%8oaZyzX}?JJvjYv99UUu-DoA zBs0dMlA}aT-*ZDS)m@wJWc43Xa={mlXT$K6h?vy-XXKVO*)}J$y zU?U6(i`-XDi$QL8&6id0w>` z`f{a4?H60=MFX=G4px_`nD%A87(8;^4a03iiw3D7h0q9m znxP-R>Dy>F^brq@#Ipi6BElX~+fZ=H%mH7)(D!lxbVVjbeklC`u6w9e;Of-D$_7$d zIg`^L)oC40a)NuG(2aFNfz?u%I~!{cPoY$*wX94eS@nlgW6BFhX2V3tgvOrmZK4CR>l%rV%Md$+!sVVEhE^@cMoSca zpE|7=mSuY#u@BYfnOyK zc^bCh>!)HUShefCG7KqPvVzngBBrYrB<|MDelNq@NR-xWe&bMOd8azV&x-K1Q7#^$ zMa%2%AA1G!nV*{k|eWm)OHrMaq~3*Mh(E7xR7H(`3J}r(PI1vY#qQ^Ot2Dp-r#XSnF7N zG~Bjw2Iy>+4LAoMe=J_ex9aPCYA@kOi}#gq)JumG4r46>W}ajNx2WfKHpmyc-Hbv3d=DxAHR*H7!;>ZSfxqoga(6)sM_s8_6M6-{TKc9aWeHh?DkXFEE!jI@_zK!%mE$4sx z<;Zh$IVa}!u)fEVIpn#s<$pmsK4rHf?)lnlXQRhr-#0VBSIuaSCZX2<$(x#0JN(98 zX?~=!B#CR%YB1GHruDhP;0oB76?n=o<*w)2YCR-CodYl{Rx=P0F9T=@03nM%9=$Py zEWa%!UkIqRASsc4+UQHG&+dgfV8>am*(_+#iHGy~yQI@M$u%sh;*y3uxq-524aS!m z$rQAxK27iO9`|NGyCoOg0(7BH+TxF6LJVC3&Hc8P%jN{U??ELQsUc4++hz_S3vbb+ zigqsWTwlA2l1(dQUK7#jk@+(}FBrjEq)f%Wd9|3`6Leg^{I#=}}B0htF`KWQZpl zfu9+pWM)y#*15WzqE^nJnwg&v_BHw=t1WOn6+_^>qXrae1caj$XF?AolDm9sGTbL# z!Nn4(HLp58H<4pC+o^kZL>Ek1rS+aFS|rx|gsrI2e%Hu^OKeug(J3F9vVIw!!89@= z?`@<@K!VCKRHY7WOvu-gN>4AdCLiAPSzlyFg)MrxpOKvR!^DgQUQjU1oqg{6O}Ik; z3#$JZM^`priTd99AwGLz#16$cmpG&5nw_`rT|c_;uO`pbiRcVm#)sP6>_v5NQm&PE zR!Wkh$)W7NnH6Cg>C7Xce_vs@GrxaLi;k9Nv>$oIr$k}gY;sO0Pt;7NfBgKYE4F|v z`hu0F211j|7fflEaMBQQLeAnf@gCs~m6ZzL^{3iqUctJrZz5(ervogW4UE*iBhe3> z!z&otWEGX=>bAABgXw+0!H~Le0|Nsv4(|a#!pUC_-vv~tGyDY3Tzh~*$JjGP(|75)NrKK^;J0|Nu`;;M4P-NG{*x1DgF zYH%tc=Ow(RM?+BiSQo50Gwk73t$_6O3z?a*VS5bz^#?d$C1ZbC5!n}pPwGUM-t4HF zek^_W3HwYs2EzST8sAHeBe!Xuw)d$H^=LCUN0MAB{q5J8dfMdxbug_e zG9^>bA^Y~ju@8d+)hU9n7zL~o6en_pJTYPZ5g`9;VKj46_c=8tawG9sE%(9H_NBHb zPg*%KkiS@33O4VZEU%DT{fU$nTVIPsw2ADk&5sgtbwSkYXehRZU$({btd&b_{JgY| z?#i6(lP9(ZrLn`|p&W8lBYj^~uo97oviS`Tpd_aYmw{dAGl|ux&JT5Dp9BDA1@jD> z7j2u5m|Z60zH+goBQTOJAcN)_vnAg}Rcq(`X7-jYl0oML;GwZ1%Ia!+Q<3gxz*Jg<-o*-nG| zz(U8bz=w{Le(eHdO&ytI3e+7~N@23)?Yg~qQ53nyC*CvHCfpeUh}lrxyZc1pl~v0I z>?*x_q*-1VAP+`5WznwK0`x07Jk-mtQdrk_-yAmDGYCz5699v~#`ALL`!0Z~7&;H& zgT`WW`*RTH@%7Y%XQ#gVpiQbR4so=Ze#LvttD^&m*y_wH&LR(Sos4e>OVb0SV(p(z zT>7CXNr|M^EBMYEG@uy0yER;`tWk0N4QgBN1@`xh`5_m{8Y3u8>%%jvE27v8n{ZcFeE0cVRJlSTVH`TXZOeN*4#zj7}#Y2OvdydlfrQT|Gn|z zW=o<-#u6QjckWUt-;p5rF5{U3^9k7)tq<9Yr1GnDu?qj%w?a%T5C?d@((i(`;;#)3 z>48Hl0C zhmA~8kdiDX226cA-RcooP$ksD6ZxSlB`?fN5|a&|;QO-f34GPb}41w0*29FW-e-fj~5N*lnWFnx zrCw#yRSvl!gc&Jkd8Z^k7ITh`)IeFa=kGPBgCkQMWAgj~IdNrW<>oFE2FsBPU07U% zVS_QnADmz=#_E`bwRNRtK=}ep&@3=1^&(68MJny%<^AK{!NJat-#6vPXW;aUr#Q@K zYOWe^JF92rx2{ivSZa%1z4+wg0Kv-3Plm?J@m{RBuj0QcA5?7TzrafakC5>8@C$$~ zl7dw@Qt!jO2SAlLQX36~5kJ=WG;hHJkV4H!%nMEqlSJQNU_u64dNBfOABNWhFX`OK zIM~rC0Tq%_qXt~NrhhNlKm z*3hCBmZrOXM%?k>byC>`OD;HBtcD@kghkUr$xR}pXQGHwL7macZ-NB_Xlr-hK_fOF z;$vE3>NHC)tWP=yuygi-XV;!C8Fjp(@AOFfZT{mMo#kg+MtrC^3{9GsGABswLCjloP;S*p8X(2jh-U$Y3 z!T@t4i~uTXRHTBA+F<##G-STm>pxrL%)UMDH-&OfO?;K)P!6!k^$Y#CbG3hb%mFqH z1|-C?GR`WIXIg5=L)V?Zqs6>*KupLHjd|;<#<5s>umQ$?3wtQOm+j5%gzvc9X{uP7ByftDm&unq8@*O8n z`;wx}r!0Dt^QV);O9#v>0c?4)*FUh$8Hp#m#|-O zg5L#+yL6w^!Jyxk=mcAk;nh^>WaBK=jG(b9oN&^mGF4+Wa2aVh*%S*@hDjym6BF$T zcSl{EmN1n@|6#_WDf6UefXT^Xm`$-nyi*WL6=Nx*qUGvg)IGD`LA;9MT! zv;P3zWU&R53?Ufb;9GlW5#mODr{Hrqsu=+k4X58NiL?nrCgCiNwqwj6rx!4UlbJNY zWq3Q5TIenKJfa{?o~TGEC;lf2f-LgKpZOGOYHENR4{`^@dK@R)w?SmV zBy0{q3QIKkI1l!9zeQRpZsG>RZgp8v5j~6^4gkPmL$0PIMc@QQ<%#GaT0L}0M|6G? zvM|=Rq2Kj9Y{nm^YnaFqxIJu#wZDxWy5&>f5W3HAZwEN(62OuRQ|80t;*fTpcUEATfj}I1TYRa~8ar5Om23=q*Rteq~Gc)e% zb*y8cla++1tC+^VesaGFM5-qDf3Bz~GOkK0E7v)MlOU872xqljjh}V3IU0YT(ZQOL zckfnD-i2aOOoB;{VPNa6(-8b(Mg(4eZ_|thP5$03u7A3p&u{`cHGF)0_Uo-FYiny8 zzn?#bZ=M%}#&cyw-^Im6T{Ig|h`OYh2jA-vfLM~qNK{StO`}kR2}}uNxg!bs6-j+m zvo~Pmb)xcp98j`U#C}`;0dW@GK#&}YPCS!;``_&@_tnnW&r7L;k6}s_;=||hn3i|H zsCc@hWY2N5b#%^ux@v0^z>q(OwaEXHdz@&bYH0su`-~joZ|e}hbUXYYgCUfo@*Tj8 zvL_MBO+PVn`#r?}-lUox!nzVJK*$jLFg$)9KkWnB6%ZCc$~eQv{C^4sx?llOICwH@ z5N{Y(3yc+wy&f)yz(E+<)1mpDGBAsHoh!;0Qt}pk*#6&5%ejGrr_o{TAfH4-x z(^z2vaW6oX40Cy&-~2WKFtUdOfv#;(NK3$#_Utf*RXlXsMm(;;V;Ik1*bR1Erng{QKHtcr-Yqn#3)Tfh zHH7?>UXipT#*_RNMDgGc-p7EVO)Td7%Q5avt{9ooB8W`lu^5a#3YtC>mywaN$&}U5 zm{l3d%FJYBVNskexyc29N7*>?36RwR)+HXHCmtJfbJAX3UbmN^#R8E8iBPq(afybI z0IO$|4tZVRP`ZYOO3u!gA9C94m$@~y3JVGf(&XH?K+*rBXzKe4g7AVZ5FqpVTJX@IYGCmL?*O520VGDSj@4D%pLcD7Rrz|DjN{$!tNjHK)$|sFgnv9znwp9HcV~ia zWBdrjR1e%uF?hXX003Oe2bwAt0f9xA7Q+RQ$Wq~R-mw@dn_o)4VhpqVSAeeB*tU}{ z?8y-Ws6Ak@kTZ0fmeKa|YT0)ZtQZ|tI01P=b)Xxbdu(==0vO|61T|y3W#v_Mtct-g zn*ai@ZEIV)@Ak41(5ZX$+aL~h&pIe9p=_3AWV)#hA(}L8Zve;>mR0uS|8JVvT`(*Z z3-YfW5`)0B__i~&zG5b*a`Xv5DP}Oa5%k;)W*C4x2)t7T0shbvR6sKrto;6w`*$47 z?kE9PXjvFT!cR1?anP&jbe);MK<*F20Zxkl^@XKJz$gg-A6amzh=dQ@BS6jnWCHkq zWSGtmJUV+I0Yn*4(G!y?&nP<*e1a3jEC3rCTx6tw+Uo!Q?lFNyQa-oFa^yLP0UObH zBRT*hGpeXy0uh&FSr}cyPnLiCmt#C~hebBo7UvAU`EZ5H-m>=`_TB>uP2D<66j<`W ze;4@~tTfDLXJ@d4C0M^3G`My5zrarcU=nEQ#6$9vUf(^leU=dd>n6f->m%_sYvG>xnVjtx3!O6?0NLNXk2L3Neymj&b diff --git a/docs/subnetlaplace.html b/docs/subnetlaplace.html new file mode 100644 index 00000000..31d93975 --- /dev/null +++ b/docs/subnetlaplace.html @@ -0,0 +1,171 @@ + + + + + + +laplace.subnetlaplace API documentation + + + + + + + + + + + + +
      +
      +
      +

      Module laplace.subnetlaplace

      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +

      Classes

      +
      +
      +class SubnetLaplace +(model, likelihood, subnetwork_indices, sigma_noise=1.0, prior_precision=1.0, prior_mean=0.0, temperature=1.0, backend=laplace.curvature.backpack.BackPackGGN, backend_kwargs=None) +
      +
      +

      Class for subnetwork Laplace, which computes the Laplace approximation over +just a subset of the model parameters (i.e. a subnetwork within the neural network), +as proposed in [1]. Subnetwork Laplace only supports a full Hessian approximation; other +approximations could be used in theory, but would not make as much sense conceptually.

      +

      A Laplace approximation is represented by a MAP which is given by the +model parameter and a posterior precision or covariance specifying +a Gaussian distribution \mathcal{N}(\theta_{MAP}, P^{-1}). +Here, only a subset of the model parameters (i.e. a subnetwork of the +neural network) are treated probabilistically. +The goal of this class is to compute the posterior precision P +which sums as + +P = \sum_{n=1}^N \nabla^2_\theta \log p(\mathcal{D}_n \mid \theta) +\vert_{\theta_{MAP}} + \nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}}. + +The prior is assumed to be Gaussian and therefore we have a simple form for +\nabla^2_\theta \log p(\theta) \vert_{\theta_{MAP}} = P_0 . +In particular, we assume a scalar or diagonal prior precision so that in +all cases P_0 = \textrm{diag}(p_0) and the structure of p_0 can be varied.

      +

      The subnetwork Laplace approximation only supports a full, i.e., dense, log likelihood +Hessian approximation and hence posterior precision. +Based on the chosen backend +parameter, the full approximation can be, for example, a generalized Gauss-Newton +matrix. +Mathematically, we have P \in \mathbb{R}^{P \times P}. +See FullLaplace and BaseLaplace for the full interface.

      +

      References

      +

      [1] Daxberger, E., Nalisnick, E., Allingham, JU., Antorán, J., Hernández-Lobato, JM. +Bayesian Deep Learning via Subnetwork Inference. +ICML 2021.

      +

      Parameters

      +
      +
      model : torch.nn.Module or FeatureExtractor
      +
       
      +
      likelihood : {'classification', 'regression'}
      +
      determines the log likelihood Hessian approximation
      +
      subnetwork_indices : torch.LongTensor
      +
      indices of the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters())) +that define the subnetwork to apply the Laplace approximation over
      +
      sigma_noise : torch.Tensor or float, default=1
      +
      observation noise for the regression setting; must be 1 for classification
      +
      prior_precision : torch.Tensor or float, default=1
      +
      prior precision of a Gaussian prior (= weight decay); +can be scalar, per-layer, or diagonal in the most general case
      +
      prior_mean : torch.Tensor or float, default=0
      +
      prior mean of a Gaussian prior, useful for continual learning
      +
      temperature : float, default=1
      +
      temperature of the likelihood; lower temperature leads to more +concentrated posterior and vice versa.
      +
      backend : subclasses of CurvatureInterface
      +
      backend for access to curvature/Hessian approximations
      +
      backend_kwargs : dict, default=None
      +
      arguments passed to the backend on initialization, for example to +set the number of MC samples for stochastic approximations.
      +
      +

      Ancestors

      + +

      Instance variables

      +
      +
      var prior_precision_diag
      +
      +

      Obtain the diagonal prior precision p_0 constructed from either +a scalar or diagonal prior precision.

      +

      Returns

      +
      +
      prior_precision_diag : torch.Tensor
      +
       
      +
      +
      +
      +

      Inherited members

      + +
      +
      +
      +
      + +
      + + + \ No newline at end of file diff --git a/docs/feature_extractor.html b/docs/utils/feature_extractor.html similarity index 84% rename from docs/feature_extractor.html rename to docs/utils/feature_extractor.html index 1c7ef071..9599128b 100644 --- a/docs/feature_extractor.html +++ b/docs/utils/feature_extractor.html @@ -4,7 +4,7 @@ -laplace.feature_extractor API documentation +laplace.utils.feature_extractor API documentation @@ -20,7 +20,7 @@
      -

      Module laplace.feature_extractor

      +

      Module laplace.utils.feature_extractor

      @@ -33,7 +33,7 @@

      Module laplace.feature_extractor

      Classes

      -
      +
      class FeatureExtractor (model: torch.nn.modules.module.Module, last_layer_name: Optional[str] = None)
      @@ -61,18 +61,18 @@

      Ancestors

    Class variables

    -
    var dump_patches : bool
    +
    var dump_patches : bool
    -
    var training : bool
    +
    var training : bool

    Methods

    -
    +
    def forward(self, x: torch.Tensor) ‑> torch.Tensor
    @@ -84,7 +84,7 @@

    Parameters

    one batch of data to use as input for the forward pass
    -
    +
    def forward_with_features(self, x: torch.Tensor) ‑> Tuple[torch.Tensor, torch.Tensor]
    @@ -97,7 +97,7 @@

    Parameters

    one batch of data to use as input for the forward pass
  • -
    +
    def set_last_layer(self, last_layer_name: str) ‑> None
    @@ -109,7 +109,7 @@

    Parameters

    the name of the last layer (fixed in model.named_modules()).
    -
    +
    def find_last_layer(self, x: torch.Tensor) ‑> torch.Tensor
    @@ -138,18 +138,18 @@

    Index

    • Super-module

    • Classes

      diff --git a/docs/utils/index.html b/docs/utils/index.html new file mode 100644 index 00000000..2848898a --- /dev/null +++ b/docs/utils/index.html @@ -0,0 +1,1017 @@ + + + + + + +laplace.utils API documentation + + + + + + + + + + + + +
      +
      +
      +

      Module laplace.utils

      +
      +
      +
      +
      +

      Sub-modules

      +
      +
      laplace.utils.feature_extractor
      +
      +
      +
      +
      laplace.utils.matrix
      +
      +
      +
      +
      laplace.utils.subnetmask
      +
      +
      +
      +
      laplace.utils.swag
      +
      +
      +
      +
      laplace.utils.utils
      +
      +
      +
      +
      +
      +
      +
      +
      +

      Functions

      +
      +
      +def get_nll(out_dist, targets) +
      +
      +
      +
      +
      +def validate(laplace, val_loader, pred_type='glm', link_approx='probit', n_samples=100) +
      +
      +
      +
      +
      +def parameters_per_layer(model) +
      +
      +

      Get number of parameters per layer.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      +

      Returns

      +
      +
      params_per_layer : list[int]
      +
       
      +
      +
      +
      +def invsqrt_precision(M) +
      +
      +

      Compute M^{-0.5} as a tridiagonal matrix.

      +

      Parameters

      +
      +
      M : torch.Tensor
      +
       
      +
      +

      Returns

      +
      +
      M_invsqrt : torch.Tensor
      +
       
      +
      +
      +
      +def kron(t1, t2) +
      +
      +

      Computes the Kronecker product between two tensors.

      +

      Parameters

      +
      +
      t1 : torch.Tensor
      +
       
      +
      t2 : torch.Tensor
      +
       
      +
      +

      Returns

      +
      +
      kron_product : torch.Tensor
      +
       
      +
      +
      +
      +def diagonal_add_scalar(X, value) +
      +
      +

      Add scalar value value to diagonal of X.

      +

      Parameters

      +
      +
      X : torch.Tensor
      +
       
      +
      value : torch.Tensor or float
      +
       
      +
      +

      Returns

      +
      +
      X_add_scalar : torch.Tensor
      +
       
      +
      +
      +
      +def symeig(M) +
      +
      +

      Symetric eigendecomposition avoiding failure cases by +adding and removing jitter to the diagonal.

      +

      Parameters

      +
      +
      M : torch.Tensor
      +
       
      +
      +

      Returns

      +
      +
      L : torch.Tensor
      +
      eigenvalues
      +
      W : torch.Tensor
      +
      eigenvectors
      +
      +
      +
      +def block_diag(blocks) +
      +
      +

      Compose block-diagonal matrix of individual blocks.

      +

      Parameters

      +
      +
      blocks : list[torch.Tensor]
      +
       
      +
      +

      Returns

      +
      +
      M : torch.Tensor
      +
       
      +
      +
      +
      +def expand_prior_precision(prior_prec, model) +
      +
      +

      Expand prior precision to match the shape of the model parameters.

      +

      Parameters

      +
      +
      prior_prec : torch.Tensor 1-dimensional
      +
      prior precision
      +
      model : torch.nn.Module
      +
      torch model with parameters that are regularized by prior_prec
      +
      +

      Returns

      +
      +
      expanded_prior_prec : torch.Tensor
      +
      expanded prior precision has the same shape as model parameters
      +
      +
      +
      +def fit_diagonal_swag_var(model, train_loader, criterion, n_snapshots_total=40, snapshot_freq=1, lr=0.01, momentum=0.9, weight_decay=0.0003, min_var=1e-30) +
      +
      +

      Fit diagonal SWAG [1], which estimates marginal variances of model parameters by +computing the first and second moment of SGD iterates with a large learning rate.

      +

      Implementation partly adapted from: +- https://github.com/wjmaddox/swa_gaussian/blob/master/swag/posteriors/swag.py +- https://github.com/wjmaddox/swa_gaussian/blob/master/experiments/train/run_swag.py

      +

      References

      +

      [1] Maddox, W., Garipov, T., Izmailov, P., Vetrov, D., Wilson, AG. +A Simple Baseline for Bayesian Uncertainty in Deep Learning. +NeurIPS 2019.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      train_loader : torch.data.utils.DataLoader
      +
      training data loader to use for snapshot collection
      +
      criterion : torch.nn.CrossEntropyLoss or torch.nn.MSELoss
      +
      loss function to use for snapshot collection
      +
      n_snapshots_total : int
      +
      total number of model snapshots to collect
      +
      snapshot_freq : int
      +
      snapshot collection frequency (in epochs)
      +
      lr : float
      +
      SGD learning rate for collecting snapshots
      +
      momentum : float
      +
      SGD momentum
      +
      weight_decay : float
      +
      SGD weight decay
      +
      min_var : float
      +
      minimum parameter variance to clamp to (for numerical stability)
      +
      +

      Returns

      +
      +
      param_variances : torch.Tensor
      +
      vector of marginal variances for each model parameter
      +
      +
      +
      +
      +
      +

      Classes

      +
      +
      +class FeatureExtractor +(model: torch.nn.modules.module.Module, last_layer_name: Optional[str] = None) +
      +
      +

      Feature extractor for a PyTorch neural network. +A wrapper which can return the output of the penultimate layer in addition to +the output of the last layer for each forward pass. If the name of the last +layer is not known, it can determine it automatically. It assumes that the +last layer is linear and that for every forward pass the last layer is the same. +If the name of the last layer is known, it can be passed as a parameter at +initilization; this is the safest way to use this class. +Based on https://gist.github.com/fkodom/27ed045c9051a39102e8bcf4ce31df76.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
      PyTorch model
      +
      last_layer_name : str, default=None
      +
      if the name of the last layer is already known, otherwise it will +be determined automatically.
      +
      +

      Initializes internal Module state, shared by both nn.Module and ScriptModule.

      +

      Ancestors

      +
        +
      • torch.nn.modules.module.Module
      • +
      +

      Class variables

      +
      +
      var dump_patches : bool
      +
      +
      +
      +
      var training : bool
      +
      +
      +
      +
      +

      Methods

      +
      +
      +def forward(self, x: torch.Tensor) ‑> torch.Tensor +
      +
      +

      Forward pass. If the last layer is not known yet, it will be +determined when this function is called for the first time.

      +

      Parameters

      +
      +
      x : torch.Tensor
      +
      one batch of data to use as input for the forward pass
      +
      +
      +
      +def forward_with_features(self, x: torch.Tensor) ‑> Tuple[torch.Tensor, torch.Tensor] +
      +
      +

      Forward pass which returns the output of the penultimate layer along +with the output of the last layer. If the last layer is not known yet, +it will be determined when this function is called for the first time.

      +

      Parameters

      +
      +
      x : torch.Tensor
      +
      one batch of data to use as input for the forward pass
      +
      +
      +
      +def set_last_layer(self, last_layer_name: str) ‑> None +
      +
      +

      Set the last layer of the model by its name. This sets the forward +hook to get the output of the penultimate layer.

      +

      Parameters

      +
      +
      last_layer_name : str
      +
      the name of the last layer (fixed in model.named_modules()).
      +
      +
      +
      +def find_last_layer(self, x: torch.Tensor) ‑> torch.Tensor +
      +
      +

      Automatically determines the last layer of the model with one +forward pass. It assumes that the last layer is the same for every +forward pass and that it is an instance of torch.nn.Linear. +Might not work with every architecture, but is tested with all PyTorch +torchvision classification models (besides SqueezeNet, which has no +linear last layer).

      +

      Parameters

      +
      +
      x : torch.Tensor
      +
      one batch of data to use as input for the forward pass
      +
      +
      +
      +
      +
      +class Kron +(kfacs) +
      +
      +

      Kronecker factored approximate curvature representation for a corresponding +neural network. +Each element in kfacs is either a tuple or single matrix. +A tuple represents two Kronecker factors Q, and H and a single element +is just a full block Hessian approximation.

      +

      Parameters

      +
      +
      kfacs : list[Tuple]
      +
      each element in the list is a Tuple of two Kronecker factors Q, H +or a single matrix approximating the Hessian (in case of bias, for example)
      +
      +

      Static methods

      +
      +
      +def init_from_model(model, device) +
      +
      +

      Initialize Kronecker factors based on a models architecture.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      device : torch.device
      +
       
      +
      +

      Returns

      +
      +
      kron : Kron
      +
       
      +
      +
      +
      +

      Methods

      +
      +
      +def decompose(self, damping=False) +
      +
      +

      Eigendecompose Kronecker factors and turn into KronDecomposed. +Parameters

      +
      +
      +
      damping : bool
      +
      use damping
      +
      +

      Returns

      +
      +
      kron_decomposed : KronDecomposed
      +
       
      +
      +
      +
      +def bmm(self, W: torch.Tensor, exponent: float = 1) ‑> torch.Tensor +
      +
      +

      Batched matrix multiplication with the Kronecker factors. +If Kron is H, we compute H @ W. +This is useful for computing the predictive or a regularization +based on Kronecker factors as in continual learning.

      +

      Parameters

      +
      +
      W : torch.Tensor
      +
      matrix (batch, classes, params)
      +
      exponent : float, default=1
      +
      only can be 1 for Kron, requires KronDecomposed for other +exponent values of the Kronecker factors.
      +
      +

      Returns

      +
      +
      SW : torch.Tensor
      +
      result (batch, classes, params)
      +
      +
      +
      +def logdet(self) ‑> torch.Tensor +
      +
      +

      Compute log determinant of the Kronecker factors and sums them up. +This corresponds to the log determinant of the entire Hessian approximation.

      +

      Returns

      +
      +
      logdet : torch.Tensor
      +
       
      +
      +
      +
      +def diag(self) ‑> torch.Tensor +
      +
      +

      Extract diagonal of the entire Kronecker factorization.

      +

      Returns

      +
      +
      diag : torch.Tensor
      +
       
      +
      +
      +
      +def to_matrix(self) ‑> torch.Tensor +
      +
      +

      Make the Kronecker factorization dense by computing the kronecker product. +Warning: this should only be used for testing purposes as it will allocate +large amounts of memory for big architectures.

      +

      Returns

      +
      +
      block_diag : torch.Tensor
      +
       
      +
      +
      +
      +
      +
      +class KronDecomposed +(eigenvectors, eigenvalues, deltas=None, damping=False) +
      +
      +

      Decomposed Kronecker factored approximate curvature representation +for a corresponding neural network. +Each matrix in Kron is decomposed to obtain KronDecomposed. +Front-loading decomposition allows cheap repeated computation +of inverses and log determinants. +In contrast to Kron, we can add scalar or layerwise scalars but +we cannot add other Kron or KronDecomposed anymore.

      +

      Parameters

      +
      +
      eigenvectors : list[Tuple[torch.Tensor]]
      +
      eigenvectors corresponding to matrices in a corresponding Kron
      +
      eigenvalues : list[Tuple[torch.Tensor]]
      +
      eigenvalues corresponding to matrices in a corresponding Kron
      +
      deltas : torch.Tensor
      +
      addend for each group of Kronecker factors representing, for example, +a prior precision
      +
      dampen : bool, default=False
      +
      use dampen approximation mixing prior and Kron partially multiplicatively
      +
      +

      Methods

      +
      +
      +def detach(self) +
      +
      +
      +
      +
      +def logdet(self) ‑> torch.Tensor +
      +
      +

      Compute log determinant of the Kronecker factors and sums them up. +This corresponds to the log determinant of the entire Hessian approximation. +In contrast to Kron.logdet(), additive deltas corresponding to prior +precisions are added.

      +

      Returns

      +
      +
      logdet : torch.Tensor
      +
       
      +
      +
      +
      +def inv_square_form(self, W: torch.Tensor) ‑> torch.Tensor +
      +
      +
      +
      +
      +def bmm(self, W: torch.Tensor, exponent: float = -1) ‑> torch.Tensor +
      +
      +

      Batched matrix multiplication with the decomposed Kronecker factors. +This is useful for computing the predictive or a regularization loss. +Compared to Kron.bmm(), a prior can be added here in form of deltas +and the exponent can be other than just 1. +Computes H^{exponent} W.

      +

      Parameters

      +
      +
      W : torch.Tensor
      +
      matrix (batch, classes, params)
      +
      exponent : float, default=1
      +
       
      +
      +

      Returns

      +
      +
      SW : torch.Tensor
      +
      result (batch, classes, params)
      +
      +
      +
      +def to_matrix(self, exponent: float = 1) ‑> torch.Tensor +
      +
      +

      Make the Kronecker factorization dense by computing the kronecker product. +Warning: this should only be used for testing purposes as it will allocate +large amounts of memory for big architectures.

      +

      Returns

      +
      +
      block_diag : torch.Tensor
      +
       
      +
      +
      +
      +
      +
      +class SubnetMask +(model) +
      +
      +

      Baseclass for all subnetwork masks in this library (for subnetwork Laplace).

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      +

      Subclasses

      + +

      Instance variables

      +
      +
      var indices
      +
      +
      +
      +
      var n_params_subnet
      +
      +
      +
      +
      +

      Methods

      +
      +
      +def convert_subnet_mask_to_indices(self, subnet_mask) +
      +
      +

      Converts a subnetwork mask into subnetwork indices.

      +

      Parameters

      +
      +
      subnet_mask : torch.Tensor
      +
      a binary vector of size (n_params) where 1s locate the subnetwork parameters +within the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters()))
      +
      +

      Returns

      +
      +
      subnet_mask_indices : torch.LongTensor
      +
      a vector of indices of the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters())) +that define the subnetwork
      +
      +
      +
      +def select(self, train_loader=None) +
      +
      +

      Select the subnetwork mask.

      +

      Parameters

      +
      +
      train_loader : torch.data.utils.DataLoader, default=None
      +
      each iterate is a training batch (X, y); +train_loader.dataset needs to be set to access N, size of the data set
      +
      +

      Returns

      +
      +
      subnet_mask_indices : torch.LongTensor
      +
      a vector of indices of the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters())) +that define the subnetwork
      +
      +
      +
      +def get_subnet_mask(self, train_loader) +
      +
      +

      Get the subnetwork mask.

      +

      Parameters

      +
      +
      train_loader : torch.data.utils.DataLoader
      +
      each iterate is a training batch (X, y); +train_loader.dataset needs to be set to access N, size of the data set
      +
      +

      Returns

      +
      +
      subnet_mask : torch.Tensor
      +
      a binary vector of size (n_params) where 1s locate the subnetwork parameters +within the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters()))
      +
      +
      +
      +
      +
      +class RandomSubnetMask +(model, n_params_subnet) +
      +
      +

      Subnetwork mask of parameters sampled uniformly at random.

      +

      Ancestors

      +
        +
      • laplace.utils.subnetmask.ScoreBasedSubnetMask
      • +
      • SubnetMask
      • +
      +

      Methods

      +
      +
      +def compute_param_scores(self, train_loader) +
      +
      +
      +
      +
      +

      Inherited members

      + +
      +
      +class LargestMagnitudeSubnetMask +(model, n_params_subnet) +
      +
      +

      Subnetwork mask identifying the parameters with the largest magnitude.

      +

      Ancestors

      +
        +
      • laplace.utils.subnetmask.ScoreBasedSubnetMask
      • +
      • SubnetMask
      • +
      +

      Methods

      +
      +
      +def compute_param_scores(self, train_loader) +
      +
      +
      +
      +
      +

      Inherited members

      + +
      +
      +class LargestVarianceDiagLaplaceSubnetMask +(model, n_params_subnet, diag_laplace_model) +
      +
      +

      Subnetwork mask identifying the parameters with the largest marginal variances +(estimated using a diagonal Laplace approximation over all model parameters).

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      n_params_subnet : int
      +
      number of parameters in the subnetwork (i.e. number of top-scoring parameters to select)
      +
      diag_laplace_model : DiagLaplace
      +
      diagonal Laplace model to use for variance estimation
      +
      +

      Ancestors

      +
        +
      • laplace.utils.subnetmask.ScoreBasedSubnetMask
      • +
      • SubnetMask
      • +
      +

      Methods

      +
      +
      +def compute_param_scores(self, train_loader) +
      +
      +
      +
      +
      +

      Inherited members

      + +
      +
      +class LargestVarianceSWAGSubnetMask +(model, n_params_subnet, likelihood='classification', swag_n_snapshots=40, swag_snapshot_freq=1, swag_lr=0.01) +
      +
      +

      Subnetwork mask identifying the parameters with the largest marginal variances +(estimated using diagonal SWAG over all model parameters).

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      n_params_subnet : int
      +
      number of parameters in the subnetwork (i.e. number of top-scoring parameters to select)
      +
      likelihood : str
      +
      'classification' or 'regression'
      +
      swag_n_snapshots : int
      +
      number of model snapshots to collect for SWAG
      +
      swag_snapshot_freq : int
      +
      SWAG snapshot collection frequency (in epochs)
      +
      swag_lr : float
      +
      learning rate for SWAG snapshot collection
      +
      +

      Ancestors

      +
        +
      • laplace.utils.subnetmask.ScoreBasedSubnetMask
      • +
      • SubnetMask
      • +
      +

      Methods

      +
      +
      +def compute_param_scores(self, train_loader) +
      +
      +
      +
      +
      +

      Inherited members

      + +
      +
      +class ParamNameSubnetMask +(model, parameter_names) +
      +
      +

      Subnetwork mask corresponding to the specified parameters of the neural network.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      parameter_names : List[str]
      +
      list of names of the parameters (as in model.named_parameters()) +that define the subnetwork
      +
      +

      Ancestors

      + +

      Methods

      +
      +
      +def get_subnet_mask(self, train_loader) +
      +
      +

      Get the subnetwork mask identifying the specified parameters.

      +
      +
      +

      Inherited members

      + +
      +
      +class ModuleNameSubnetMask +(model, module_names) +
      +
      +

      Subnetwork mask corresponding to the specified modules of the neural network.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      parameter_names : List[str]
      +
      list of names of the modules (as in model.named_modules()) that define the subnetwork; +the modules cannot have children, i.e. need to be leaf modules
      +
      +

      Ancestors

      + +

      Subclasses

      + +

      Methods

      +
      +
      +def get_subnet_mask(self, train_loader) +
      +
      +

      Get the subnetwork mask identifying the specified modules.

      +
      +
      +

      Inherited members

      + +
      +
      +class LastLayerSubnetMask +(model, last_layer_name=None) +
      +
      +

      Subnetwork mask corresponding to the last layer of the neural network.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      last_layer_name : str, default=None
      +
      name of the model's last layer, if None it will be determined automatically
      +
      +

      Ancestors

      + +

      Methods

      +
      +
      +def get_subnet_mask(self, train_loader) +
      +
      +

      Get the subnetwork mask identifying the last layer.

      +
      +
      +

      Inherited members

      + +
      +
      +
      +
      + +
      + + + \ No newline at end of file diff --git a/docs/matrix.html b/docs/utils/matrix.html similarity index 75% rename from docs/matrix.html rename to docs/utils/matrix.html index 6323f7cf..caa3e688 100644 --- a/docs/matrix.html +++ b/docs/utils/matrix.html @@ -4,7 +4,7 @@ -laplace.matrix API documentation +laplace.utils.matrix API documentation @@ -20,7 +20,7 @@
      -

      Module laplace.matrix

      +

      Module laplace.utils.matrix

      @@ -33,7 +33,7 @@

      Module laplace.matrix

      Classes

      -
      +
      class Kron (kfacs)
      @@ -51,7 +51,7 @@

      Parameters

      Static methods

      -
      +
      def init_from_model(model, device)
      @@ -65,18 +65,18 @@

      Parameters

      Returns

      -
      kron : Kron
      +
      kron : Kron
       

    Methods

    -
    +
    def decompose(self, damping=False)
    -

    Eigendecompose Kronecker factors and turn into KronDecomposed. +

    Eigendecompose Kronecker factors and turn into KronDecomposed. Parameters


    @@ -85,11 +85,11 @@

    Methods

    Returns

    -
    kron_decomposed : KronDecomposed
    +
    kron_decomposed : KronDecomposed
     
    -
    +
    def bmm(self, W: torch.Tensor, exponent: float = 1) ‑> torch.Tensor
    @@ -102,7 +102,7 @@

    Parameters

    W : torch.Tensor
    matrix (batch, classes, params)
    exponent : float, default=1
    -
    only can be 1 for Kron, requires KronDecomposed for other +
    only can be 1 for Kron, requires KronDecomposed for other exponent values of the Kronecker factors.

    Returns

    @@ -111,7 +111,7 @@

    Returns

    result (batch, classes, params)
    -
    +
    def logdet(self) ‑> torch.Tensor
    @@ -123,7 +123,7 @@

    Returns

     
    -
    +
    def diag(self) ‑> torch.Tensor
    @@ -134,7 +134,7 @@

    Returns

     
    -
    +
    def to_matrix(self) ‑> torch.Tensor
    @@ -149,24 +149,24 @@

    Returns

    -
    +
    class KronDecomposed (eigenvectors, eigenvalues, deltas=None, damping=False)

    Decomposed Kronecker factored approximate curvature representation for a corresponding neural network. -Each matrix in Kron is decomposed to obtain KronDecomposed. +Each matrix in Kron is decomposed to obtain KronDecomposed. Front-loading decomposition allows cheap repeated computation of inverses and log determinants. -In contrast to Kron, we can add scalar or layerwise scalars but -we cannot add other Kron or KronDecomposed anymore.

    +In contrast to Kron, we can add scalar or layerwise scalars but +we cannot add other Kron or KronDecomposed anymore.

    Parameters

    eigenvectors : list[Tuple[torch.Tensor]]
    -
    eigenvectors corresponding to matrices in a corresponding Kron
    +
    eigenvectors corresponding to matrices in a corresponding Kron
    eigenvalues : list[Tuple[torch.Tensor]]
    -
    eigenvalues corresponding to matrices in a corresponding Kron
    +
    eigenvalues corresponding to matrices in a corresponding Kron
    deltas : torch.Tensor
    addend for each group of Kronecker factors representing, for example, a prior precision
    @@ -175,19 +175,19 @@

    Parameters

    Methods

    -
    +
    def detach(self)
    -
    +
    def logdet(self) ‑> torch.Tensor

    Compute log determinant of the Kronecker factors and sums them up. This corresponds to the log determinant of the entire Hessian approximation. -In contrast to Kron.logdet(), additive deltas corresponding to prior +In contrast to Kron.logdet(), additive deltas corresponding to prior precisions are added.

    Returns

    @@ -195,19 +195,19 @@

    Returns

     
    -
    +
    def inv_square_form(self, W: torch.Tensor) ‑> torch.Tensor
    -
    +
    def bmm(self, W: torch.Tensor, exponent: float = -1) ‑> torch.Tensor

    Batched matrix multiplication with the decomposed Kronecker factors. This is useful for computing the predictive or a regularization loss. -Compared to Kron.bmm(), a prior can be added here in form of deltas +Compared to Kron.bmm(), a prior can be added here in form of deltas and the exponent can be other than just 1. Computes H^{exponent} W.

    Parameters

    @@ -223,7 +223,7 @@

    Returns

    result (batch, classes, params)
    -
    +
    def to_matrix(self, exponent: float = 1) ‑> torch.Tensor
    @@ -249,30 +249,30 @@

    Index

    • Super-module

    • Classes

      diff --git a/docs/utils/subnetmask.html b/docs/utils/subnetmask.html new file mode 100644 index 00000000..15781ff4 --- /dev/null +++ b/docs/utils/subnetmask.html @@ -0,0 +1,466 @@ + + + + + + +laplace.utils.subnetmask API documentation + + + + + + + + + + + + +
      +
      +
      +

      Module laplace.utils.subnetmask

      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +

      Classes

      +
      +
      +class SubnetMask +(model) +
      +
      +

      Baseclass for all subnetwork masks in this library (for subnetwork Laplace).

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      +

      Subclasses

      + +

      Instance variables

      +
      +
      var indices
      +
      +
      +
      +
      var n_params_subnet
      +
      +
      +
      +
      +

      Methods

      +
      +
      +def convert_subnet_mask_to_indices(self, subnet_mask) +
      +
      +

      Converts a subnetwork mask into subnetwork indices.

      +

      Parameters

      +
      +
      subnet_mask : torch.Tensor
      +
      a binary vector of size (n_params) where 1s locate the subnetwork parameters +within the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters()))
      +
      +

      Returns

      +
      +
      subnet_mask_indices : torch.LongTensor
      +
      a vector of indices of the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters())) +that define the subnetwork
      +
      +
      +
      +def select(self, train_loader=None) +
      +
      +

      Select the subnetwork mask.

      +

      Parameters

      +
      +
      train_loader : torch.data.utils.DataLoader, default=None
      +
      each iterate is a training batch (X, y); +train_loader.dataset needs to be set to access N, size of the data set
      +
      +

      Returns

      +
      +
      subnet_mask_indices : torch.LongTensor
      +
      a vector of indices of the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters())) +that define the subnetwork
      +
      +
      +
      +def get_subnet_mask(self, train_loader) +
      +
      +

      Get the subnetwork mask.

      +

      Parameters

      +
      +
      train_loader : torch.data.utils.DataLoader
      +
      each iterate is a training batch (X, y); +train_loader.dataset needs to be set to access N, size of the data set
      +
      +

      Returns

      +
      +
      subnet_mask : torch.Tensor
      +
      a binary vector of size (n_params) where 1s locate the subnetwork parameters +within the vectorized model parameters +(i.e. torch.nn.utils.parameters_to_vector(model.parameters()))
      +
      +
      +
      +
      +
      +class RandomSubnetMask +(model, n_params_subnet) +
      +
      +

      Subnetwork mask of parameters sampled uniformly at random.

      +

      Ancestors

      +
        +
      • laplace.utils.subnetmask.ScoreBasedSubnetMask
      • +
      • SubnetMask
      • +
      +

      Methods

      +
      +
      +def compute_param_scores(self, train_loader) +
      +
      +
      +
      +
      +

      Inherited members

      + +
      +
      +class LargestMagnitudeSubnetMask +(model, n_params_subnet) +
      +
      +

      Subnetwork mask identifying the parameters with the largest magnitude.

      +

      Ancestors

      +
        +
      • laplace.utils.subnetmask.ScoreBasedSubnetMask
      • +
      • SubnetMask
      • +
      +

      Methods

      +
      +
      +def compute_param_scores(self, train_loader) +
      +
      +
      +
      +
      +

      Inherited members

      + +
      +
      +class LargestVarianceDiagLaplaceSubnetMask +(model, n_params_subnet, diag_laplace_model) +
      +
      +

      Subnetwork mask identifying the parameters with the largest marginal variances +(estimated using a diagonal Laplace approximation over all model parameters).

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      n_params_subnet : int
      +
      number of parameters in the subnetwork (i.e. number of top-scoring parameters to select)
      +
      diag_laplace_model : DiagLaplace
      +
      diagonal Laplace model to use for variance estimation
      +
      +

      Ancestors

      +
        +
      • laplace.utils.subnetmask.ScoreBasedSubnetMask
      • +
      • SubnetMask
      • +
      +

      Methods

      +
      +
      +def compute_param_scores(self, train_loader) +
      +
      +
      +
      +
      +

      Inherited members

      + +
      +
      +class LargestVarianceSWAGSubnetMask +(model, n_params_subnet, likelihood='classification', swag_n_snapshots=40, swag_snapshot_freq=1, swag_lr=0.01) +
      +
      +

      Subnetwork mask identifying the parameters with the largest marginal variances +(estimated using diagonal SWAG over all model parameters).

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      n_params_subnet : int
      +
      number of parameters in the subnetwork (i.e. number of top-scoring parameters to select)
      +
      likelihood : str
      +
      'classification' or 'regression'
      +
      swag_n_snapshots : int
      +
      number of model snapshots to collect for SWAG
      +
      swag_snapshot_freq : int
      +
      SWAG snapshot collection frequency (in epochs)
      +
      swag_lr : float
      +
      learning rate for SWAG snapshot collection
      +
      +

      Ancestors

      +
        +
      • laplace.utils.subnetmask.ScoreBasedSubnetMask
      • +
      • SubnetMask
      • +
      +

      Methods

      +
      +
      +def compute_param_scores(self, train_loader) +
      +
      +
      +
      +
      +

      Inherited members

      + +
      +
      +class ParamNameSubnetMask +(model, parameter_names) +
      +
      +

      Subnetwork mask corresponding to the specified parameters of the neural network.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      parameter_names : List[str]
      +
      list of names of the parameters (as in model.named_parameters()) +that define the subnetwork
      +
      +

      Ancestors

      + +

      Methods

      +
      +
      +def get_subnet_mask(self, train_loader) +
      +
      +

      Get the subnetwork mask identifying the specified parameters.

      +
      +
      +

      Inherited members

      + +
      +
      +class ModuleNameSubnetMask +(model, module_names) +
      +
      +

      Subnetwork mask corresponding to the specified modules of the neural network.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      parameter_names : List[str]
      +
      list of names of the modules (as in model.named_modules()) that define the subnetwork; +the modules cannot have children, i.e. need to be leaf modules
      +
      +

      Ancestors

      + +

      Subclasses

      + +

      Methods

      +
      +
      +def get_subnet_mask(self, train_loader) +
      +
      +

      Get the subnetwork mask identifying the specified modules.

      +
      +
      +

      Inherited members

      + +
      +
      +class LastLayerSubnetMask +(model, last_layer_name=None) +
      +
      +

      Subnetwork mask corresponding to the last layer of the neural network.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      last_layer_name : str, default=None
      +
      name of the model's last layer, if None it will be determined automatically
      +
      +

      Ancestors

      + +

      Methods

      +
      +
      +def get_subnet_mask(self, train_loader) +
      +
      +

      Get the subnetwork mask identifying the last layer.

      +
      +
      +

      Inherited members

      + +
      +
      +
      +
      + +
      + + + \ No newline at end of file diff --git a/docs/utils/swag.html b/docs/utils/swag.html new file mode 100644 index 00000000..9f1e1843 --- /dev/null +++ b/docs/utils/swag.html @@ -0,0 +1,102 @@ + + + + + + +laplace.utils.swag API documentation + + + + + + + + + + + + +
      +
      +
      +

      Module laplace.utils.swag

      +
      +
      +
      +
      +
      +
      +
      +
      +

      Functions

      +
      +
      +def fit_diagonal_swag_var(model, train_loader, criterion, n_snapshots_total=40, snapshot_freq=1, lr=0.01, momentum=0.9, weight_decay=0.0003, min_var=1e-30) +
      +
      +

      Fit diagonal SWAG [1], which estimates marginal variances of model parameters by +computing the first and second moment of SGD iterates with a large learning rate.

      +

      Implementation partly adapted from: +- https://github.com/wjmaddox/swa_gaussian/blob/master/swag/posteriors/swag.py +- https://github.com/wjmaddox/swa_gaussian/blob/master/experiments/train/run_swag.py

      +

      References

      +

      [1] Maddox, W., Garipov, T., Izmailov, P., Vetrov, D., Wilson, AG. +A Simple Baseline for Bayesian Uncertainty in Deep Learning. +NeurIPS 2019.

      +

      Parameters

      +
      +
      model : torch.nn.Module
      +
       
      +
      train_loader : torch.data.utils.DataLoader
      +
      training data loader to use for snapshot collection
      +
      criterion : torch.nn.CrossEntropyLoss or torch.nn.MSELoss
      +
      loss function to use for snapshot collection
      +
      n_snapshots_total : int
      +
      total number of model snapshots to collect
      +
      snapshot_freq : int
      +
      snapshot collection frequency (in epochs)
      +
      lr : float
      +
      SGD learning rate for collecting snapshots
      +
      momentum : float
      +
      SGD momentum
      +
      weight_decay : float
      +
      SGD weight decay
      +
      min_var : float
      +
      minimum parameter variance to clamp to (for numerical stability)
      +
      +

      Returns

      +
      +
      param_variances : torch.Tensor
      +
      vector of marginal variances for each model parameter
      +
      +
      +
      +
      +
      +
      +
      + +
      + + + \ No newline at end of file diff --git a/docs/utils.html b/docs/utils/utils.html similarity index 84% rename from docs/utils.html rename to docs/utils/utils.html index 633a3565..aa721218 100644 --- a/docs/utils.html +++ b/docs/utils/utils.html @@ -4,7 +4,7 @@ -laplace.utils API documentation +laplace.utils.utils API documentation @@ -20,7 +20,7 @@
      -

      Module laplace.utils

      +

      Module laplace.utils.utils

      @@ -31,19 +31,19 @@

      Module laplace.utils

      Functions

      -
      +
      def get_nll(out_dist, targets)
      -
      +
      def validate(laplace, val_loader, pred_type='glm', link_approx='probit', n_samples=100)
      -
      +
      def parameters_per_layer(model)
      @@ -59,7 +59,7 @@

      Returns

       
    -
    +
    def invsqrt_precision(M)
    @@ -75,7 +75,7 @@

    Returns

     
    -
    +
    def kron(t1, t2)
    @@ -93,7 +93,7 @@

    Returns

     
    -
    +
    def diagonal_add_scalar(X, value)
    @@ -111,7 +111,7 @@

    Returns

     
    -
    +
    def symeig(M)
    @@ -130,7 +130,7 @@

    Returns

    eigenvectors
    -
    +
    def block_diag(blocks)
    @@ -146,7 +146,7 @@

    Returns

     
    -
    +
    def expand_prior_precision(prior_prec, model)
    @@ -160,7 +160,7 @@

    Parameters

    Returns

    -
    expanded_prior_prec : torch.Tensor
    +
    expanded_prior_prec : torch.Tensor
    expanded prior precision has the same shape as model parameters
    @@ -177,20 +177,20 @@

    Index