diff --git a/pl_bolts/models/self_supervised/cpc/cpc_module.py b/pl_bolts/models/self_supervised/cpc/cpc_module.py index 926a4f6b25..d325fc55cf 100644 --- a/pl_bolts/models/self_supervised/cpc/cpc_module.py +++ b/pl_bolts/models/self_supervised/cpc/cpc_module.py @@ -124,7 +124,7 @@ def forward(self, img_1): Z = self.encoder(img_1) # non cpc resnets return a list - if self.hparams.encoder != "cpc_encoder": + if self.hparams.encoder_name != "cpc_encoder": Z = Z[0] # (?) -> (b, -1, nb_feats, nb_feats) diff --git a/tests/models/self_supervised/test_models.py b/tests/models/self_supervised/test_models.py index 03d3f98170..59f0ca0519 100644 --- a/tests/models/self_supervised/test_models.py +++ b/tests/models/self_supervised/test_models.py @@ -15,9 +15,7 @@ from tests import _MARK_REQUIRE_GPU -# todo: seems to be failing on GH Actions for min config @pytest.mark.skipif(**_MARK_REQUIRE_GPU) -@pytest.mark.skip(reason="RuntimeError: Given groups=1, weight of size [256, 2048, 1, 1], expected input[2, 1, 32, 32]") def test_cpcv2(tmpdir, datadir): datamodule = CIFAR10DataModule(data_dir=datadir, num_workers=0, batch_size=2) datamodule.train_transforms = CPCTrainTransformsCIFAR10() @@ -30,7 +28,12 @@ def test_cpcv2(tmpdir, datadir): online_ft=True, num_classes=datamodule.num_classes, ) - trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir) + + # FIXME: workaround for bug caused by + # https://github.com/PyTorchLightning/lightning-bolts/commit/2e903c333c37ea83394c7da2ce826de1b82fb356 + model.datamodule = datamodule + + trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, gpus=1 if torch.cuda.device_count() > 0 else 0) trainer.fit(model, datamodule=datamodule)