diff --git a/examples/dcgan.py b/examples/dcgan.py index c1065f12..585e145a 100644 --- a/examples/dcgan.py +++ b/examples/dcgan.py @@ -201,7 +201,7 @@ def __init__(self, ngpu): nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), - nn.Tanh() + nn.Tanh(), # state size. (nc) x 64 x 64 ) diff --git a/opacus/accountants/analysis/prv/domain.py b/opacus/accountants/analysis/prv/domain.py index 5e86502d..220dbbd9 100644 --- a/opacus/accountants/analysis/prv/domain.py +++ b/opacus/accountants/analysis/prv/domain.py @@ -12,6 +12,7 @@ class Domain: Stores relevant information about the domain on which PRVs are discretized, and includes a few convenience methods for manipulating it. """ + t_min: float t_max: float size: int diff --git a/opacus/layers/dp_rnn.py b/opacus/layers/dp_rnn.py index 5f5c6819..d1d5b61f 100644 --- a/opacus/layers/dp_rnn.py +++ b/opacus/layers/dp_rnn.py @@ -405,9 +405,10 @@ def forward( for direction, (cell, h0, c0) in directions: # apply single direction layer (with dropout) out_layer, h, c = self.forward_layer( - x - if layer == 0 - else output, # [T, B, D/H/2H] / tuple T x [B, D/H/2H] + ( + x if layer == 0 else output + # [T, B, D/H/2H] / tuple T x [B, D/H/2H] + ), h0, # [B, H] c0, batch_sizes, diff --git a/opacus/privacy_engine.py b/opacus/privacy_engine.py index 0ca6811b..555769d0 100644 --- a/opacus/privacy_engine.py +++ b/opacus/privacy_engine.py @@ -536,9 +536,9 @@ def save_checkpoint( if noise_scheduler is not None: checkpoint_dict["noise_scheduler_state_dict"] = noise_scheduler.state_dict() if grad_clip_scheduler is not None: - checkpoint_dict[ - "grad_clip_scheduler_state_dict" - ] = grad_clip_scheduler.state_dict() + checkpoint_dict["grad_clip_scheduler_state_dict"] = ( + grad_clip_scheduler.state_dict() + ) torch.save(checkpoint_dict, path, **(torch_save_kwargs or {})) diff --git a/opacus/tests/grad_samples/conv2d_test.py b/opacus/tests/grad_samples/conv2d_test.py index b0deae3f..d1178506 100644 --- a/opacus/tests/grad_samples/conv2d_test.py +++ b/opacus/tests/grad_samples/conv2d_test.py @@ -90,9 +90,9 @@ def test_conv2d( # Test 'convolution as a backward' GSM # 'convolution as a backward' doesn't support padding=same conv2d_gsm = GradSampleModule.GRAD_SAMPLERS[nn.Conv2d] - GradSampleModule.GRAD_SAMPLERS[ - nn.Conv2d - ] = convolution2d_backward_as_a_convolution + GradSampleModule.GRAD_SAMPLERS[nn.Conv2d] = ( + convolution2d_backward_as_a_convolution + ) self.run_test( x, conv, diff --git a/opacus/tests/privacy_engine_test.py b/opacus/tests/privacy_engine_test.py index 022acfa3..f2ed1a32 100644 --- a/opacus/tests/privacy_engine_test.py +++ b/opacus/tests/privacy_engine_test.py @@ -87,9 +87,11 @@ def _init_vanilla_training( ): model = self._init_model() optimizer = torch.optim.SGD( - model.parameters() - if not opt_exclude_frozen - else [p for p in model.parameters() if p.requires_grad], + ( + model.parameters() + if not opt_exclude_frozen + else [p for p in model.parameters() if p.requires_grad] + ), lr=self.LR, momentum=0, ) @@ -112,9 +114,11 @@ def _init_private_training( model = self._init_model() model = PrivacyEngine.get_compatible_module(model) optimizer = torch.optim.SGD( - model.parameters() - if not opt_exclude_frozen - else [p for p in model.parameters() if p.requires_grad], + ( + model.parameters() + if not opt_exclude_frozen + else [p for p in model.parameters() if p.requires_grad] + ), lr=self.LR, momentum=0, )