From 83a5ef97b99faebcfccfb02a122a0b93784deae5 Mon Sep 17 00:00:00 2001 From: Martin Jankowiak Date: Thu, 2 Feb 2023 14:21:23 -0800 Subject: [PATCH] rerun newest black on repo --- examples/air/air.py | 4 ---- examples/air/main.py | 2 -- examples/air/modules.py | 2 +- examples/contrib/funsor/hmm.py | 1 + examples/contrib/mue/FactorMuE.py | 1 - examples/contrib/mue/ProfileHMM.py | 1 - examples/contrib/oed/ab_test.py | 1 - examples/cvae/baseline.py | 1 - examples/cvae/cvae.py | 2 -- examples/cvae/util.py | 2 -- examples/dmm.py | 2 -- examples/einsum.py | 4 ---- examples/hmm.py | 1 + examples/mixed_hmm/experiment.py | 3 --- examples/mixed_hmm/model.py | 4 ---- examples/mixed_hmm/seal_data.py | 1 - examples/rsa/hyperbole.py | 1 - examples/rsa/schelling_false.py | 1 - examples/rsa/search_inference.py | 1 - examples/rsa/semantic_parsing.py | 1 - examples/vae/ss_vae_M2.py | 8 +------- examples/vae/utils/custom_mlp.py | 2 -- examples/vae/utils/mnist_cached.py | 1 - pyro/contrib/funsor/handlers/enum_messenger.py | 2 +- pyro/contrib/funsor/handlers/named_messenger.py | 2 -- pyro/contrib/funsor/handlers/runtime.py | 1 - pyro/contrib/funsor/infer/discrete.py | 1 - pyro/contrib/funsor/infer/traceenum_elbo.py | 2 -- pyro/contrib/gp/models/sgpr.py | 1 - pyro/contrib/mue/dataloaders.py | 3 --- pyro/contrib/mue/models.py | 4 ---- pyro/contrib/oed/eig.py | 7 ------- pyro/contrib/oed/glmm/glmm.py | 1 - pyro/contrib/oed/glmm/guides.py | 7 ------- pyro/contrib/timeseries/gp.py | 1 - pyro/contrib/tracking/dynamic_models.py | 1 - pyro/distributions/diag_normal_mixture.py | 1 - pyro/distributions/diag_normal_mixture_shared_cov.py | 1 - pyro/distributions/sine_bivariate_von_mises.py | 1 - pyro/infer/mcmc/util.py | 1 - pyro/infer/tracegraph_elbo.py | 5 ----- pyro/ops/contract.py | 1 - pyro/optim/optim.py | 1 - pyro/poutine/block_messenger.py | 1 - pyro/poutine/do_messenger.py | 1 - pyro/poutine/enum_messenger.py | 2 -- pyro/poutine/handlers.py | 1 - pyro/poutine/runtime.py | 2 -- tests/contrib/autoname/test_autoname.py | 1 - tests/contrib/autoname/test_scoping.py | 1 - tests/contrib/funsor/test_infer_discrete.py | 2 -- tests/contrib/funsor/test_named_handlers.py | 1 - tests/contrib/funsor/test_valid_models_enum.py | 1 - tests/contrib/funsor/test_vectorized_markov.py | 3 --- tests/contrib/mue/test_dataloaders.py | 2 -- tests/contrib/mue/test_missingdatahmm.py | 6 ------ tests/contrib/mue/test_statearrangers.py | 2 -- tests/contrib/oed/test_finite_spaces_eig.py | 3 --- tests/contrib/oed/test_linear_models_eig.py | 3 --- tests/distributions/test_cuda.py | 3 --- tests/infer/autoguide/test_inference.py | 1 - tests/infer/test_autoguide.py | 2 -- tests/infer/test_compute_downstream_costs.py | 1 - tests/infer/test_discrete.py | 1 - tests/infer/test_enum.py | 1 - tests/infer/test_sampling.py | 3 --- tests/infer/test_smcfilter.py | 1 - tests/infer/test_valid_models.py | 1 - tests/nn/test_module.py | 1 - tests/poutine/test_counterfactual.py | 1 - tests/poutine/test_poutines.py | 5 ----- 71 files changed, 5 insertions(+), 139 deletions(-) diff --git a/examples/air/air.py b/examples/air/air.py index ae0a026911..61a9ce70b8 100644 --- a/examples/air/air.py +++ b/examples/air/air.py @@ -57,7 +57,6 @@ def __init__( likelihood_sd=0.3, use_cuda=False, ): - super().__init__() self.num_steps = num_steps @@ -127,7 +126,6 @@ def __init__( self.cuda() def prior(self, n, **kwargs): - state = ModelState( x=torch.zeros(n, self.x_size, self.x_size, **self.options), z_pres=torch.ones(n, self.z_pres_size, **self.options), @@ -145,7 +143,6 @@ def prior(self, n, **kwargs): return (z_where, z_pres), state.x def prior_step(self, t, n, prev, z_pres_prior_p=default_z_pres_prior_p): - # Sample presence indicators. z_pres = pyro.sample( "z_pres_{}".format(t), @@ -263,7 +260,6 @@ def guide(self, data, batch_size, **kwargs): return z_where, z_pres def guide_step(self, t, n, prev, inputs): - rnn_input = torch.cat( (inputs["embed"], prev.z_where, prev.z_what, prev.z_pres), 1 ) diff --git a/examples/air/main.py b/examples/air/main.py index 55239fad9e..37abef531e 100644 --- a/examples/air/main.py +++ b/examples/air/main.py @@ -128,7 +128,6 @@ def load_data(): def main(**kwargs): - args = argparse.Namespace(**kwargs) if "save" in args: @@ -229,7 +228,6 @@ def per_param_optim_args(param_name): examples_to_viz = X[5:10] for i in range(1, args.num_steps + 1): - loss = svi.step( X, batch_size=args.batch_size, z_pres_prior_p=partial(z_pres_prior_p, i) ) diff --git a/examples/air/modules.py b/examples/air/modules.py index bb57c52cd4..b35a66353a 100644 --- a/examples/air/modules.py +++ b/examples/air/modules.py @@ -50,7 +50,7 @@ def __init__( layers = [] in_sizes = [in_size] + out_sizes[0:-1] sizes = list(zip(in_sizes, out_sizes)) - for (i, o) in sizes[0:-1]: + for i, o in sizes[0:-1]: layers.append(nn.Linear(i, o)) layers.append(non_linear_layer()) layers.append(nn.Linear(sizes[-1][0], sizes[-1][1])) diff --git a/examples/contrib/funsor/hmm.py b/examples/contrib/funsor/hmm.py index f77db02bfa..2dce627e48 100644 --- a/examples/contrib/funsor/hmm.py +++ b/examples/contrib/funsor/hmm.py @@ -178,6 +178,7 @@ def model_0(sequences, lengths, args, batch_size=None, include_prior=True): # and randomly subsample data to size batch_size. To add jit support we # silence some warnings and try to avoid dynamic program structure. + # Note that this is the "HMM" model in reference [1] (with the difference that # in [1] the probabilities probs_x and probs_y are not MAP-regularized with # Dirichlet and Beta distributions for any of the models) diff --git a/examples/contrib/mue/FactorMuE.py b/examples/contrib/mue/FactorMuE.py index 1a06e97fcc..cd5ec11035 100644 --- a/examples/contrib/mue/FactorMuE.py +++ b/examples/contrib/mue/FactorMuE.py @@ -60,7 +60,6 @@ def generate_data(small_test, include_stop, device): def main(args): - # Load dataset. if args.cpu_data or not args.cuda: device = torch.device("cpu") diff --git a/examples/contrib/mue/ProfileHMM.py b/examples/contrib/mue/ProfileHMM.py index a7e09353ce..1226cb42b8 100644 --- a/examples/contrib/mue/ProfileHMM.py +++ b/examples/contrib/mue/ProfileHMM.py @@ -64,7 +64,6 @@ def generate_data(small_test, include_stop, device): def main(args): - pyro.set_rng_seed(args.rng_seed) # Load dataset. diff --git a/examples/contrib/oed/ab_test.py b/examples/contrib/oed/ab_test.py index 75bf77c048..720a0f8157 100644 --- a/examples/contrib/oed/ab_test.py +++ b/examples/contrib/oed/ab_test.py @@ -92,7 +92,6 @@ def true_ape(ns): def main(num_vi_steps, num_bo_steps, seed): - pyro.set_rng_seed(seed) pyro.clear_param_store() diff --git a/examples/cvae/baseline.py b/examples/cvae/baseline.py index 6c4a0a18e0..68695c6d5e 100644 --- a/examples/cvae/baseline.py +++ b/examples/cvae/baseline.py @@ -48,7 +48,6 @@ def train( early_stop_patience, model_path, ): - # Train baseline baseline_net = BaselineNet(500, 500) baseline_net.to(device) diff --git a/examples/cvae/cvae.py b/examples/cvae/cvae.py index e6a6fd8eda..5f38a7ad93 100644 --- a/examples/cvae/cvae.py +++ b/examples/cvae/cvae.py @@ -70,7 +70,6 @@ def model(self, xs, ys=None): pyro.module("generation_net", self) batch_size = xs.shape[0] with pyro.plate("data"): - # Prior network uses the baseline predictions as initial guess. # This is the generative process with recurrent connection with torch.no_grad(): @@ -130,7 +129,6 @@ def train( model_path, pre_trained_baseline_net, ): - # clear param store pyro.clear_param_store() diff --git a/examples/cvae/util.py b/examples/cvae/util.py index 57caa55e1d..2e86238f66 100644 --- a/examples/cvae/util.py +++ b/examples/cvae/util.py @@ -47,7 +47,6 @@ def visualize( num_samples, image_path=None, ): - # Load sample random data datasets, _, dataset_sizes = get_data( num_quadrant_inputs=num_quadrant_inputs, batch_size=num_images @@ -121,7 +120,6 @@ def generate_table( num_particles, col_name, ): - # Load sample random data datasets, dataloaders, dataset_sizes = get_data( num_quadrant_inputs=num_quadrant_inputs, batch_size=32 diff --git a/examples/dmm.py b/examples/dmm.py index 026a1a735d..f25c402b49 100644 --- a/examples/dmm.py +++ b/examples/dmm.py @@ -208,7 +208,6 @@ def model( mini_batch_seq_lengths, annealing_factor=1.0, ): - # this is the number of time steps we need to process in the mini-batch T_max = mini_batch.size(1) @@ -269,7 +268,6 @@ def guide( mini_batch_seq_lengths, annealing_factor=1.0, ): - # this is the number of time steps we need to process in the mini-batch T_max = mini_batch.size(1) # register all PyTorch (sub)modules with pyro diff --git a/examples/einsum.py b/examples/einsum.py index 1c257d00f8..e61137a811 100644 --- a/examples/einsum.py +++ b/examples/einsum.py @@ -43,7 +43,6 @@ def jit_prob(equation, *operands, **kwargs): """ key = "prob", equation, kwargs["plates"] if key not in _CACHE: - # This simply wraps einsum for jit compilation. def _einsum(*operands): return einsum(equation, *operands, **kwargs) @@ -61,7 +60,6 @@ def jit_logprob(equation, *operands, **kwargs): """ key = "logprob", equation, kwargs["plates"] if key not in _CACHE: - # This simply wraps einsum for jit compilation. def _einsum(*operands): return einsum( @@ -81,7 +79,6 @@ def jit_gradient(equation, *operands, **kwargs): """ key = "gradient", equation, kwargs["plates"] if key not in _CACHE: - # This wraps einsum for jit compilation, but we will call backward on the result. def _einsum(*operands): return einsum( @@ -114,7 +111,6 @@ def _jit_adjoint(equation, *operands, **kwargs): backend = kwargs.pop("backend", "pyro.ops.einsum.torch_sample") key = backend, equation, tuple(x.shape for x in operands), kwargs["plates"] if key not in _CACHE: - # This wraps a complete adjoint algorithm call. @ignore_jit_warnings() def _forward_backward(*operands): diff --git a/examples/hmm.py b/examples/hmm.py index 4689be23c7..79ffe4289f 100644 --- a/examples/hmm.py +++ b/examples/hmm.py @@ -167,6 +167,7 @@ def model_0(sequences, lengths, args, batch_size=None, include_prior=True): # and randomly subsample data to size batch_size. To add jit support we # silence some warnings and try to avoid dynamic program structure. + # Note that this is the "HMM" model in reference [1] (with the difference that # in [1] the probabilities probs_x and probs_y are not MAP-regularized with # Dirichlet and Beta distributions for any of the models) diff --git a/examples/mixed_hmm/experiment.py b/examples/mixed_hmm/experiment.py index e7df74cbc9..65f900c9d1 100644 --- a/examples/mixed_hmm/experiment.py +++ b/examples/mixed_hmm/experiment.py @@ -35,7 +35,6 @@ def _size(tensor): def run_expt(args): - data_dir = args["folder"] dataset = "seal" # args["dataset"] seed = args["seed"] @@ -79,7 +78,6 @@ def run_expt(args): schedule_step_loss = True for t in range(timesteps): - optimizer.zero_grad() loss = loss_fn(model, guide) loss.backward() @@ -166,7 +164,6 @@ def closure(): if __name__ == "__main__": - parser = argparse.ArgumentParser() parser.add_argument("-g", "--group", default="none", type=str) parser.add_argument("-i", "--individual", default="none", type=str) diff --git a/examples/mixed_hmm/model.py b/examples/mixed_hmm/model.py index ed032dd0ac..1a87c23e65 100644 --- a/examples/mixed_hmm/model.py +++ b/examples/mixed_hmm/model.py @@ -48,7 +48,6 @@ def guide_generic(config): N_c = config["sizes"]["group"] with pyro.plate("group", N_c, dim=-1): - if config["group"]["random"] == "continuous": pyro.sample( "eps_g", @@ -59,7 +58,6 @@ def guide_generic(config): with pyro.plate("individual", N_s, dim=-2), poutine.mask( mask=config["individual"]["mask"] ): - # individual-level random effects if config["individual"]["random"] == "continuous": pyro.sample( @@ -158,7 +156,6 @@ def model_generic(config): N_c = config["sizes"]["group"] with pyro.plate("group", N_c, dim=-1): - # group-level random effects if config["group"]["random"] == "discrete": # group-level discrete effect @@ -179,7 +176,6 @@ def model_generic(config): with pyro.plate("individual", N_s, dim=-2), poutine.mask( mask=config["individual"]["mask"] ): - # individual-level random effects if config["individual"]["random"] == "discrete": # individual-level discrete effect diff --git a/examples/mixed_hmm/seal_data.py b/examples/mixed_hmm/seal_data.py index 5a24a0c56d..a7023750c9 100644 --- a/examples/mixed_hmm/seal_data.py +++ b/examples/mixed_hmm/seal_data.py @@ -18,7 +18,6 @@ def download_seal_data(filename): def prepare_seal(filename, random_effects): - if not os.path.exists(filename): download_seal_data(filename) diff --git a/examples/rsa/hyperbole.py b/examples/rsa/hyperbole.py index 8076818cd4..581098882e 100644 --- a/examples/rsa/hyperbole.py +++ b/examples/rsa/hyperbole.py @@ -202,7 +202,6 @@ def test_truth(): def main(args): - # test_truth() pragmatic_marginal = pragmatic_listener(args.price) diff --git a/examples/rsa/schelling_false.py b/examples/rsa/schelling_false.py index b06ab1b988..a2344fbc1f 100644 --- a/examples/rsa/schelling_false.py +++ b/examples/rsa/schelling_false.py @@ -67,7 +67,6 @@ def bob(preference, depth): def main(args): - # Here Alice and Bob slightly prefer one location over the other a priori shared_preference = torch.tensor([args.preference]) diff --git a/examples/rsa/search_inference.py b/examples/rsa/search_inference.py index e61e2ebbac..01f0e786ba 100644 --- a/examples/rsa/search_inference.py +++ b/examples/rsa/search_inference.py @@ -168,7 +168,6 @@ def sample_escape(tr, site): ) def _fn(*args, **kwargs): - for i in range(int(1e6)): assert ( not queue.empty() diff --git a/examples/rsa/semantic_parsing.py b/examples/rsa/semantic_parsing.py index c9813662d1..807b66f799 100644 --- a/examples/rsa/semantic_parsing.py +++ b/examples/rsa/semantic_parsing.py @@ -325,7 +325,6 @@ def literal_listener_raw(utterance, qud): def main(args): - mll = Marginal(literal_listener_raw, num_samples=args.num_samples) def is_any_qud(world): diff --git a/examples/vae/ss_vae_M2.py b/examples/vae/ss_vae_M2.py index 7fbc51c089..881db60e02 100644 --- a/examples/vae/ss_vae_M2.py +++ b/examples/vae/ss_vae_M2.py @@ -51,7 +51,6 @@ def __init__( use_cuda=False, aux_loss_multiplier=None, ): - super().__init__() # initialize the class with all arguments provided to the constructor @@ -68,7 +67,6 @@ def __init__( self.setup_networks() def setup_networks(self): - z_dim = self.z_dim hidden_sizes = self.hidden_layers @@ -127,7 +125,6 @@ def model(self, xs, ys=None): batch_size = xs.size(0) options = dict(dtype=xs.dtype, device=xs.device) with pyro.plate("data"): - # sample the handwriting style from the constant prior distribution prior_loc = torch.zeros(batch_size, self.z_dim, **options) prior_scale = torch.ones(batch_size, self.z_dim, **options) @@ -167,7 +164,6 @@ def guide(self, xs, ys=None): """ # inform Pyro that the variables in the batch of xs, ys are conditionally independent with pyro.plate("data"): - # if the class label (the digit) is not supervised, sample # (and score) the digit with the variational distribution # q(y|x) = categorical(alpha(x)) @@ -245,7 +241,6 @@ def run_inference_for_epoch(data_loaders, losses, periodic_interval_batches): # count the number of supervised batches seen in this epoch ctr_sup = 0 for i in range(batches_per_epoch): - # whether this batch is supervised or not is_supervised = (i % periodic_interval_batches == 1) and ctr_sup < sup_batches @@ -277,7 +272,7 @@ def get_accuracy(data_loader, classifier_fn, batch_size): predictions, actuals = [], [] # use the appropriate data loader - for (xs, ys) in data_loader: + for xs, ys in data_loader: # use classification function to compute all predictions for each batch predictions.append(classifier_fn(xs)) actuals.append(ys) @@ -370,7 +365,6 @@ def main(args): # run inference for a certain number of epochs for i in range(0, args.num_epochs): - # get the losses for an epoch epoch_losses_sup, epoch_losses_unsup = run_inference_for_epoch( data_loaders, losses, periodic_interval_batches diff --git a/examples/vae/utils/custom_mlp.py b/examples/vae/utils/custom_mlp.py index 0767d7f1b0..5330244ceb 100644 --- a/examples/vae/utils/custom_mlp.py +++ b/examples/vae/utils/custom_mlp.py @@ -168,13 +168,11 @@ def __init__( else output_activation ) else: - # we're going to have a bunch of separate layers we can spit out (a tuple of outputs) out_layers = [] # multiple outputs? handle separately for out_ix, out_size in enumerate(output_size): - # for a single output object, we create a linear layer and some weights split_layer = [] diff --git a/examples/vae/utils/mnist_cached.py b/examples/vae/utils/mnist_cached.py index d7ad578fda..212596fc20 100644 --- a/examples/vae/utils/mnist_cached.py +++ b/examples/vae/utils/mnist_cached.py @@ -150,7 +150,6 @@ def target_transform(y): ], "invalid train/test option values" if mode in ["sup", "unsup", "valid"]: - # transform the training data if transformations are provided if transform is not None: self.data = transform(self.data.float()) diff --git a/pyro/contrib/funsor/handlers/enum_messenger.py b/pyro/contrib/funsor/handlers/enum_messenger.py index 3815f1934e..89fcd7e93b 100644 --- a/pyro/contrib/funsor/handlers/enum_messenger.py +++ b/pyro/contrib/funsor/handlers/enum_messenger.py @@ -234,10 +234,10 @@ def queue( :param num_samples: optional number of extended traces for extend_fn to return :returns: stochastic function decorated with poutine logic """ + # TODO rewrite this to use purpose-built trace/replay handlers def wrapper(wrapped): def _fn(*args, **kwargs): - for i in range(max_tries): assert ( not queue.empty() diff --git a/pyro/contrib/funsor/handlers/named_messenger.py b/pyro/contrib/funsor/handlers/named_messenger.py index ff65a18223..4d96539e12 100644 --- a/pyro/contrib/funsor/handlers/named_messenger.py +++ b/pyro/contrib/funsor/handlers/named_messenger.py @@ -59,7 +59,6 @@ def __exit__(self, *args, **kwargs): @staticmethod # only depends on the global _DIM_STACK state, not self def _pyro_to_data(msg): - (funsor_value,) = msg["args"] name_to_dim = msg["kwargs"].setdefault("name_to_dim", OrderedDict()) dim_type = msg["kwargs"].setdefault("dim_type", DimType.LOCAL) @@ -82,7 +81,6 @@ def _pyro_to_data(msg): @staticmethod # only depends on the global _DIM_STACK state, not self def _pyro_to_funsor(msg): - if len(msg["args"]) == 2: raw_value, output = msg["args"] else: diff --git a/pyro/contrib/funsor/handlers/runtime.py b/pyro/contrib/funsor/handlers/runtime.py index d1d3a8081a..5ec3372308 100644 --- a/pyro/contrib/funsor/handlers/runtime.py +++ b/pyro/contrib/funsor/handlers/runtime.py @@ -181,7 +181,6 @@ def _genvalue(self, key, value_request): ) def allocate(self, key_to_value_request): - # step 1: split into fresh and non-fresh key_to_value = OrderedDict() for key, value_request in tuple(key_to_value_request.items()): diff --git a/pyro/contrib/funsor/infer/discrete.py b/pyro/contrib/funsor/infer/discrete.py index 3518882c16..8ed42323d8 100644 --- a/pyro/contrib/funsor/infer/discrete.py +++ b/pyro/contrib/funsor/infer/discrete.py @@ -13,7 +13,6 @@ def _sample_posterior(model, first_available_dim, temperature, *args, **kwargs): - if temperature == 0: sum_op, prod_op = funsor.ops.max, funsor.ops.add approx = funsor.approximations.argmax_approximate diff --git a/pyro/contrib/funsor/infer/traceenum_elbo.py b/pyro/contrib/funsor/infer/traceenum_elbo.py index 4655bc09ac..0680aa2ceb 100644 --- a/pyro/contrib/funsor/infer/traceenum_elbo.py +++ b/pyro/contrib/funsor/infer/traceenum_elbo.py @@ -92,7 +92,6 @@ def terms_from_trace(tr): @copy_docs_from(_OrigTraceEnum_ELBO) class TraceMarkovEnum_ELBO(ELBO): def differentiable_loss(self, model, guide, *args, **kwargs): - # get batched, enumerated, to_funsor-ed traces from the guide and model with plate( size=self.num_particles @@ -170,7 +169,6 @@ def differentiable_loss(self, model, guide, *args, **kwargs): @copy_docs_from(_OrigTraceEnum_ELBO) class TraceEnum_ELBO(ELBO): def differentiable_loss(self, model, guide, *args, **kwargs): - # get batched, enumerated, to_funsor-ed traces from the guide and model with plate( size=self.num_particles diff --git a/pyro/contrib/gp/models/sgpr.py b/pyro/contrib/gp/models/sgpr.py index f07b14f46c..d98c7e96ab 100644 --- a/pyro/contrib/gp/models/sgpr.py +++ b/pyro/contrib/gp/models/sgpr.py @@ -98,7 +98,6 @@ class SparseGPRegression(GPModel): def __init__( self, X, y, kernel, Xu, noise=None, mean_function=None, approx=None, jitter=1e-6 ): - assert isinstance( X, torch.Tensor ), "X needs to be a torch Tensor instead of a {}".format(type(X)) diff --git a/pyro/contrib/mue/dataloaders.py b/pyro/contrib/mue/dataloaders.py index 82370b2694..d041b325bb 100644 --- a/pyro/contrib/mue/dataloaders.py +++ b/pyro/contrib/mue/dataloaders.py @@ -62,7 +62,6 @@ def __init__( include_stop=False, device=None, ): - super().__init__() # Determine device @@ -135,11 +134,9 @@ def _one_hot(self, seq, alphabet, length): return x def __len__(self): - return self.data_size def __getitem__(self, ind): - return (self.seq_data[ind], self.L_data[ind]) diff --git a/pyro/contrib/mue/models.py b/pyro/contrib/mue/models.py index 7cf36aa3cd..ee30cd7387 100644 --- a/pyro/contrib/mue/models.py +++ b/pyro/contrib/mue/models.py @@ -77,7 +77,6 @@ def __init__( self.statearrange = Profile(latent_seq_length) def model(self, seq_data, local_scale): - # Latent sequence. precursor_seq = pyro.sample( "precursor_seq", @@ -316,7 +315,6 @@ def __init__(self, data_length, alphabet_length, z_dim): self.f1_sd = nn.Linear(self.input_size, z_dim) def forward(self, data): - data = data.reshape(-1, self.input_size) z_loc = self.f1_mn(data) z_scale = softplus(self.f1_sd(data)) @@ -452,7 +450,6 @@ def __init__( self.statearrange = Profile(latent_seq_length) def decoder(self, z, W, B, inverse_temp): - # Project. v = torch.mm(z, W) + B @@ -489,7 +486,6 @@ def decoder(self, z, W, B, inverse_temp): return out def model(self, seq_data, local_scale, local_prior_scale): - # ARD prior. if self.ARD_prior: # Relevance factors diff --git a/pyro/contrib/oed/eig.py b/pyro/contrib/oed/eig.py index 35f3f6511c..81f4b17f51 100644 --- a/pyro/contrib/oed/eig.py +++ b/pyro/contrib/oed/eig.py @@ -537,7 +537,6 @@ def _posterior_ape( *args, **kwargs ): - loss = _posterior_loss( model, guide, observation_labels, target_labels, *args, **kwargs ) @@ -834,7 +833,6 @@ def opt_eig_ape_loss( final_design=None, final_num_samples=None, ): - if final_design is None: final_design = design if final_num_samples is None: @@ -889,7 +887,6 @@ def _donsker_varadhan_loss(model, T, observation_labels, target_labels): ewma_log = EwmaLog(alpha=0.90) def loss_fn(design, num_particles, **kwargs): - try: pyro.module("T", T) except AssertionError: @@ -933,7 +930,6 @@ def _posterior_loss( """Posterior loss: to evaluate directly use `posterior_eig` setting `num_steps=0`, `eig=False`.""" def loss_fn(design, num_particles, evaluation=False, **kwargs): - expanded_design = lexpand(design, num_particles) # Sample from p(y, theta | d) @@ -970,7 +966,6 @@ def _marginal_loss(model, guide, observation_labels, target_labels): """Marginal loss: to evaluate directly use `marginal_eig` setting `num_steps=0`.""" def loss_fn(design, num_particles, evaluation=False, **kwargs): - expanded_design = lexpand(design, num_particles) # Sample from p(y | d) @@ -1002,7 +997,6 @@ def _marginal_likelihood_loss( """Marginal_likelihood loss: to evaluate directly use `marginal_likelihood_eig` setting `num_steps=0`.""" def loss_fn(design, num_particles, evaluation=False, **kwargs): - expanded_design = lexpand(design, num_particles) # Sample from p(y | d) @@ -1043,7 +1037,6 @@ def _lfire_loss( """LFIRE loss: to evaluate directly use `lfire_eig` setting `num_steps=0`.""" def loss_fn(design, num_particles, evaluation=False, **kwargs): - try: pyro.module("h", h) except AssertionError: diff --git a/pyro/contrib/oed/glmm/glmm.py b/pyro/contrib/oed/glmm/glmm.py index bb4f172855..00e8ee2efa 100644 --- a/pyro/contrib/oed/glmm/glmm.py +++ b/pyro/contrib/oed/glmm/glmm.py @@ -22,7 +22,6 @@ def known_covariance_linear_model( coef_means, coef_sds, observation_sd, coef_labels="w", observation_label="y" ): - if not isinstance(coef_means, list): coef_means = [coef_means] if not isinstance(coef_sds, list): diff --git a/pyro/contrib/oed/glmm/guides.py b/pyro/contrib/oed/glmm/guides.py index 2643ae2108..698643547a 100644 --- a/pyro/contrib/oed/glmm/guides.py +++ b/pyro/contrib/oed/glmm/guides.py @@ -69,12 +69,10 @@ def __init__( self.softplus = nn.Softplus() def get_params(self, y_dict, design, target_labels): - y = torch.cat(list(y_dict.values()), dim=-1) return self.linear_model_formula(y, design, target_labels) def linear_model_formula(self, y, design, target_labels): - if self.use_softplus: mu = {l: rmv(self.softplus(self.regressor[l]), y) for l in target_labels} else: @@ -84,7 +82,6 @@ def linear_model_formula(self, y, design, target_labels): return mu, scale_tril def forward(self, y_dict, design, observation_labels, target_labels): - pyro.module("posterior_guide", self) # Returns two dicts from labels -> tensors @@ -222,7 +219,6 @@ def __init__(self, d, n, w_sizes, **kwargs): self.h1_bias = nn.Parameter(torch.zeros(n)) def get_params(self, y_dict, design, target_labels): - y = torch.cat(list(y_dict.values()), dim=-1) # Approx invert transformation on y in expectation @@ -254,7 +250,6 @@ def __init__( self.tau_label = tau_label def get_params(self, y_dict, design, target_labels): - y = torch.cat(list(y_dict.values()), dim=-1) coefficient_labels = [ @@ -270,7 +265,6 @@ def get_params(self, y_dict, design, target_labels): return mu, scale_tril, self.alpha, beta def forward(self, y_dict, design, observation_labels, target_labels): - pyro.module("ba_guide", self) mu, scale_tril, alpha, beta = self.get_params(y_dict, design, target_labels) @@ -303,7 +297,6 @@ def __init__(self, guide): self.guide = guide def forward(self, design, trace, observation_labels, target_labels): - trace.compute_log_prob() prior_lp = sum(trace.nodes[l]["log_prob"] for l in target_labels) y_dict = {l: trace.nodes[l]["value"] for l in observation_labels} diff --git a/pyro/contrib/timeseries/gp.py b/pyro/contrib/timeseries/gp.py index 0c2eb71061..440ba0683f 100644 --- a/pyro/contrib/timeseries/gp.py +++ b/pyro/contrib/timeseries/gp.py @@ -385,7 +385,6 @@ def __init__( length_scale_init=None, obs_noise_scale_init=None, ): - if nu != 1.5: raise NotImplementedError("The only supported value of nu is 1.5") diff --git a/pyro/contrib/tracking/dynamic_models.py b/pyro/contrib/tracking/dynamic_models.py index 3b212d8a3a..cdefc504be 100644 --- a/pyro/contrib/tracking/dynamic_models.py +++ b/pyro/contrib/tracking/dynamic_models.py @@ -377,7 +377,6 @@ def process_noise_cov(self, dt=0.0): stochastic integration (for use with EKF). """ if dt not in self._Q_cache: - with torch.no_grad(): d = self._dimension dt2 = dt * dt diff --git a/pyro/distributions/diag_normal_mixture.py b/pyro/distributions/diag_normal_mixture.py index 4a7d09b1db..c4f96f0551 100644 --- a/pyro/distributions/diag_normal_mixture.py +++ b/pyro/distributions/diag_normal_mixture.py @@ -147,7 +147,6 @@ def forward(ctx, locs, scales, component_logits, pis, which, noise_shape): @staticmethod @once_differentiable def backward(ctx, grad_output): - z, scales, locs, logits, pis = ctx.saved_tensors dim = scales.size(-1) K = logits.size(-1) diff --git a/pyro/distributions/diag_normal_mixture_shared_cov.py b/pyro/distributions/diag_normal_mixture_shared_cov.py index f8ee2ed357..c9c9d23adb 100644 --- a/pyro/distributions/diag_normal_mixture_shared_cov.py +++ b/pyro/distributions/diag_normal_mixture_shared_cov.py @@ -147,7 +147,6 @@ def forward(ctx, locs, coord_scale, component_logits, pis, which, noise_shape): @staticmethod @once_differentiable def backward(ctx, grad_output): - z, coord_scale, locs, component_logits, pis = ctx.saved_tensors K = component_logits.size(-1) batch_dims = coord_scale.dim() - 1 diff --git a/pyro/distributions/sine_bivariate_von_mises.py b/pyro/distributions/sine_bivariate_von_mises.py index c4a624400f..7fbe5f3320 100644 --- a/pyro/distributions/sine_bivariate_von_mises.py +++ b/pyro/distributions/sine_bivariate_von_mises.py @@ -88,7 +88,6 @@ def __init__( weighted_correlation=None, validate_args=None, ): - assert (correlation is None) != (weighted_correlation is None) if weighted_correlation is not None: diff --git a/pyro/infer/mcmc/util.py b/pyro/infer/mcmc/util.py index da1566fd7a..1d60e7b15f 100644 --- a/pyro/infer/mcmc/util.py +++ b/pyro/infer/mcmc/util.py @@ -693,7 +693,6 @@ def predictive(model, posterior_samples, *args, **kwargs): reshaped_samples = {} for name, sample in posterior_samples.items(): - batch_size, sample_shape = sample.shape[0], sample.shape[1:] if num_samples is None: diff --git a/pyro/infer/tracegraph_elbo.py b/pyro/infer/tracegraph_elbo.py index 40c77e1367..597d05f237 100644 --- a/pyro/infer/tracegraph_elbo.py +++ b/pyro/infer/tracegraph_elbo.py @@ -46,7 +46,6 @@ def _get_baseline_options(site): def _construct_baseline(node, guide_site, downstream_cost): - # XXX should the average baseline be in the param store as below? baseline = 0.0 @@ -177,7 +176,6 @@ def _compute_downstream_costs(model_trace, guide_trace, non_reparam_nodes): # def _compute_elbo(model_trace, guide_trace): - # In ref [1], section 3.2, the part of the surrogate loss computed here is # \sum{cost}, which in this case is the ELBO. Instead of using the ELBO, # this implementation uses a surrogate ELBO which modifies some entropy @@ -362,12 +360,10 @@ def loss_and_grads(self, model, guide, *args, **kwargs): return loss def _loss_and_surrogate_loss(self, model, guide, args, kwargs): - loss = 0.0 surrogate_loss = 0.0 for model_trace, guide_trace in self._get_traces(model, guide, args, kwargs): - lp, slp = self._loss_and_surrogate_loss_particle(model_trace, guide_trace) loss += lp surrogate_loss += slp @@ -378,7 +374,6 @@ def _loss_and_surrogate_loss(self, model, guide, args, kwargs): return loss, surrogate_loss def _loss_and_surrogate_loss_particle(self, model_trace, guide_trace): - elbo, surrogate_loss = _compute_elbo(model_trace, guide_trace) return elbo, surrogate_loss diff --git a/pyro/ops/contract.py b/pyro/ops/contract.py index 5d01779332..9a23386308 100644 --- a/pyro/ops/contract.py +++ b/pyro/ops/contract.py @@ -120,7 +120,6 @@ def _contract_component(ring, tensor_tree, sum_dims, target_dims): # Split terms at the current ordinal into connected components. for terms, dims in _partition_terms(ring, leaf_terms, leaf_dims): - # Eliminate sum dims via a sumproduct contraction. term = ring.sumproduct(terms, dims - local_dims) diff --git a/pyro/optim/optim.py b/pyro/optim/optim.py index 69f2d84da7..97db2c2632 100644 --- a/pyro/optim/optim.py +++ b/pyro/optim/optim.py @@ -231,7 +231,6 @@ def _get_grad_clip_args(self, param: str) -> Dict: # if we were passed a fct, we call fct with param info # arguments are (module name, param name) e.g. ('mymodule', 'bias') if callable(self.pt_clip_args): - # get param name param_name = pyro.get_param_store().param_name(param) module_name = module_from_param_with_module_name(param_name) diff --git a/pyro/poutine/block_messenger.py b/pyro/poutine/block_messenger.py index e15f0a885b..bdd1deb709 100644 --- a/pyro/poutine/block_messenger.py +++ b/pyro/poutine/block_messenger.py @@ -21,7 +21,6 @@ def _block_fn(expose, expose_types, hide, hide_types, hide_all, msg): or (msg_type in hide_types) or (is_not_exposed and hide_all) ): # noqa: E129 - return True # otherwise expose else: diff --git a/pyro/poutine/do_messenger.py b/pyro/poutine/do_messenger.py index b89c52698c..dc2ac173e6 100644 --- a/pyro/poutine/do_messenger.py +++ b/pyro/poutine/do_messenger.py @@ -58,7 +58,6 @@ def _pyro_sample(self, msg): msg.get("_intervener_id", None) != self._intervener_id and self.data.get(msg["name"]) is not None ): - if msg.get("_intervener_id", None) is not None: warnings.warn( "Attempting to intervene on variable {} multiple times," diff --git a/pyro/poutine/enum_messenger.py b/pyro/poutine/enum_messenger.py index 234c0764b8..1046e30910 100644 --- a/pyro/poutine/enum_messenger.py +++ b/pyro/poutine/enum_messenger.py @@ -33,7 +33,6 @@ def _tmc_mixture_sample(msg): # if this site has any possible ancestors, sample ancestor indices uniformly thin_sample = fat_sample if thin_sample.shape != target_shape: - index = [Ellipsis] + [slice(None)] * (len(thin_sample.shape) - 1) squashed_dims = [] for squashed_dim, squashed_size in zip( @@ -79,7 +78,6 @@ def _tmc_diagonal_sample(msg): # if this site has any ancestors, choose ancestors from diagonal approximation thin_sample = fat_sample if thin_sample.shape != target_shape: - index = [Ellipsis] + [slice(None)] * (len(thin_sample.shape) - 1) squashed_dims = [] for squashed_dim, squashed_size in zip( diff --git a/pyro/poutine/handlers.py b/pyro/poutine/handlers.py index fbe62079af..122c3f2a4a 100644 --- a/pyro/poutine/handlers.py +++ b/pyro/poutine/handlers.py @@ -189,7 +189,6 @@ def queue( def wrapper(wrapped): def _fn(*args, **kwargs): - for i in range(max_tries): assert ( not queue.empty() diff --git a/pyro/poutine/runtime.py b/pyro/poutine/runtime.py index 59b27c8911..69c198c723 100644 --- a/pyro/poutine/runtime.py +++ b/pyro/poutine/runtime.py @@ -207,7 +207,6 @@ def apply_stack(initial_msg): pointer = 0 # go until time to stop? for frame in reversed(stack): - pointer = pointer + 1 frame._process_message(msg) @@ -253,7 +252,6 @@ def effectful(fn=None, type=None): @functools.wraps(fn) def _fn(*args, **kwargs): - name = kwargs.pop("name", None) infer = kwargs.pop("infer", {}) diff --git a/tests/contrib/autoname/test_autoname.py b/tests/contrib/autoname/test_autoname.py index 71524e7f19..753c3b5155 100644 --- a/tests/contrib/autoname/test_autoname.py +++ b/tests/contrib/autoname/test_autoname.py @@ -356,7 +356,6 @@ def geometric(p): def test_no_param(): - pyro.clear_param_store() @autoname diff --git a/tests/contrib/autoname/test_scoping.py b/tests/contrib/autoname/test_scoping.py index b0d445927d..09d9de29e3 100644 --- a/tests/contrib/autoname/test_scoping.py +++ b/tests/contrib/autoname/test_scoping.py @@ -170,7 +170,6 @@ def f2(): def test_no_param(): - pyro.clear_param_store() @scope diff --git a/tests/contrib/funsor/test_infer_discrete.py b/tests/contrib/funsor/test_infer_discrete.py index bdfcf71f7c..62c82e4e19 100644 --- a/tests/contrib/funsor/test_infer_discrete.py +++ b/tests/contrib/funsor/test_infer_discrete.py @@ -32,7 +32,6 @@ @pytest.mark.parametrize("temperature", [0, 1]) @pyroapi.pyro_backend(_PYRO_BACKEND) def test_hmm_smoke(length, temperature): - # This should match the example in the infer_discrete docstring. def hmm(data, hidden_dim=10): transition = 0.3 / hidden_dim + 0.7 * torch.eye(hidden_dim) @@ -325,7 +324,6 @@ def model_zzxx(): def model2(): - data = [torch.tensor([-1.0, -1.0, 0.0]), torch.tensor([-1.0, 1.0])] p = pyro.param("p", torch.tensor([0.25, 0.75])) loc = pyro.sample("loc", dist.Normal(0, 1).expand([2]).to_event(1)) diff --git a/tests/contrib/funsor/test_named_handlers.py b/tests/contrib/funsor/test_named_handlers.py index ea447b59d2..1330676d81 100644 --- a/tests/contrib/funsor/test_named_handlers.py +++ b/tests/contrib/funsor/test_named_handlers.py @@ -51,7 +51,6 @@ def testing(): def test_nesting(): def testing(): - with pyro.markov(): v1 = pyro.to_data( Tensor(torch.ones(2), OrderedDict([(str(1), funsor.Bint[2])]), "real") diff --git a/tests/contrib/funsor/test_valid_models_enum.py b/tests/contrib/funsor/test_valid_models_enum.py index 8ad27d6bdd..bf93528038 100644 --- a/tests/contrib/funsor/test_valid_models_enum.py +++ b/tests/contrib/funsor/test_valid_models_enum.py @@ -92,7 +92,6 @@ def assert_ok(model, guide=None, max_plate_nesting=None, **kwargs): def _check_traces(tr_pyro, tr_funsor): - assert tr_pyro.nodes.keys() == tr_funsor.nodes.keys() tr_pyro.compute_log_prob() tr_funsor.compute_log_prob() diff --git a/tests/contrib/funsor/test_vectorized_markov.py b/tests/contrib/funsor/test_vectorized_markov.py index 9fc24fdd43..1bb79e5308 100644 --- a/tests/contrib/funsor/test_vectorized_markov.py +++ b/tests/contrib/funsor/test_vectorized_markov.py @@ -728,7 +728,6 @@ def test_model_enumerated_elbo(model, guide, data, history): pyro.clear_param_store() with pyro_backend("contrib.funsor"): - model = infer.config_enumerate(model, default="parallel") elbo = infer.TraceEnum_ELBO(max_plate_nesting=4) expected_loss = elbo.loss_and_grads(model, guide, data, history, False) @@ -763,7 +762,6 @@ def test_model_enumerated_elbo_multi(model, guide, weeks_data, days_data, histor pyro.clear_param_store() with pyro_backend("contrib.funsor"): - model = infer.config_enumerate(model, default="parallel") elbo = infer.TraceEnum_ELBO(max_plate_nesting=4) expected_loss = elbo.loss_and_grads( @@ -831,7 +829,6 @@ def test_guide_enumerated_elbo(model, guide, data, history): NotImplementedError, match="TraceMarkovEnum_ELBO does not yet support guide side Markov enumeration", ): - if history > 1: pytest.xfail(reason="TraceMarkovEnum_ELBO does not yet support history > 1") diff --git a/tests/contrib/mue/test_dataloaders.py b/tests/contrib/mue/test_dataloaders.py index 00d7f4e6f0..7accfbdf1a 100644 --- a/tests/contrib/mue/test_dataloaders.py +++ b/tests/contrib/mue/test_dataloaders.py @@ -11,7 +11,6 @@ @pytest.mark.parametrize("alphabet", ["amino-acid", "dna", "ATC"]) @pytest.mark.parametrize("include_stop", [False, True]) def test_biosequencedataset(source_type, alphabet, include_stop): - # Define dataset. seqs = ["AATC", "CA", "T"] @@ -73,7 +72,6 @@ def test_biosequencedataset(source_type, alphabet, include_stop): def test_write(): - # Define dataset. seqs = ["AATC*C", "CA*", "T**"] dataset = BiosequenceDataset(seqs, "list", "ACGT*", include_stop=False) diff --git a/tests/contrib/mue/test_missingdatahmm.py b/tests/contrib/mue/test_missingdatahmm.py index 1224e7b872..f68074d033 100644 --- a/tests/contrib/mue/test_missingdatahmm.py +++ b/tests/contrib/mue/test_missingdatahmm.py @@ -9,7 +9,6 @@ def test_hmm_log_prob(): - a0 = torch.tensor([0.9, 0.08, 0.02]) a = torch.tensor([[0.1, 0.8, 0.1], [0.5, 0.3, 0.2], [0.4, 0.4, 0.2]]) e = torch.tensor([[0.99, 0.01], [0.01, 0.99], [0.5, 0.5]]) @@ -83,7 +82,6 @@ def test_hmm_log_prob(): @pytest.mark.parametrize("batch_observation", [False, True]) @pytest.mark.parametrize("batch_data", [False, True]) def test_shapes(batch_initial, batch_transition, batch_observation, batch_data): - # Dimensions. batch_size = 3 state_dim, observation_dim, num_steps = 4, 5, 6 @@ -239,7 +237,6 @@ def test_samples(batch_data): def indiv_filter(a0, a, e, x): - alph = torch.zeros((x.shape[0], a0.shape[0])) for j in range(a0.shape[0]): vec = a0[j] @@ -258,7 +255,6 @@ def indiv_filter(a0, a, e, x): def indiv_smooth(a0, a, e, x): - alph = indiv_filter(a0, a, e, x) beta = torch.zeros(alph.shape) beta[-1, :] = 1.0 @@ -300,7 +296,6 @@ def indiv_map_states(a0, a, e, x): def test_state_infer(): - # HMM parameters. a0 = torch.tensor([0.9, 0.08, 0.02]) a = torch.tensor([[0.1, 0.8, 0.1], [0.5, 0.3, 0.2], [0.4, 0.4, 0.2]]) @@ -498,7 +493,6 @@ def test_sample_given_states(): def test_sample_states(): - # Effectively deterministic to check sampler. eps = 1e-10 a0 = torch.tensor([1 - eps, eps / 2, eps / 2]) diff --git a/tests/contrib/mue/test_statearrangers.py b/tests/contrib/mue/test_statearrangers.py index 9df91e819e..2b0a03acad 100644 --- a/tests/contrib/mue/test_statearrangers.py +++ b/tests/contrib/mue/test_statearrangers.py @@ -19,7 +19,6 @@ def simpleprod(lst): @pytest.mark.parametrize("batch_size", [None, 5]) @pytest.mark.parametrize("substitute", [False, True]) def test_profile_alternate_imp(M, batch_size, substitute): - # --- Setup random model. --- pf_arranger = Profile(M) @@ -199,7 +198,6 @@ def test_profile_alternate_imp(M, batch_size, substitute): def test_profile_shapes( batch_ancestor_seq, batch_insert_seq, batch_insert, batch_delete, batch_substitute ): - M, D, B = 5, 2, 3 K = 2 * M + 1 batch_size = 6 diff --git a/tests/contrib/oed/test_finite_spaces_eig.py b/tests/contrib/oed/test_finite_spaces_eig.py index c5c1aa82dc..5d2d2c9841 100644 --- a/tests/contrib/oed/test_finite_spaces_eig.py +++ b/tests/contrib/oed/test_finite_spaces_eig.py @@ -47,20 +47,17 @@ def true_eig(): def posterior_guide(y_dict, design, observation_labels, target_labels): - y = torch.cat(list(y_dict.values()), dim=-1) a, b = pyro.param("a", torch.tensor(0.0)), pyro.param("b", torch.tensor(0.0)) pyro.sample("theta", dist.Bernoulli(logits=a + b * y)) def marginal_guide(design, observation_labels, target_labels): - logit_p = pyro.param("logit_p", torch.tensor(0.0)) pyro.sample("y", dist.Bernoulli(logits=logit_p)) def likelihood_guide(theta_dict, design, observation_labels, target_labels): - theta = torch.cat(list(theta_dict.values()), dim=-1) a, b = pyro.param("a", torch.tensor(0.0)), pyro.param("b", torch.tensor(0.0)) pyro.sample("y", dist.Bernoulli(logits=a + b * theta)) diff --git a/tests/contrib/oed/test_linear_models_eig.py b/tests/contrib/oed/test_linear_models_eig.py index 7cb8aa4e29..a6cfa67e91 100644 --- a/tests/contrib/oed/test_linear_models_eig.py +++ b/tests/contrib/oed/test_linear_models_eig.py @@ -42,7 +42,6 @@ def one_point_design(): def posterior_guide(y_dict, design, observation_labels, target_labels): - y = torch.cat(list(y_dict.values()), dim=-1) A = pyro.param("A", torch.zeros(2, 3)) scale_tril = pyro.param( @@ -55,7 +54,6 @@ def posterior_guide(y_dict, design, observation_labels, target_labels): def marginal_guide(design, observation_labels, target_labels): - mu = pyro.param("mu", torch.zeros(3)) scale_tril = pyro.param( "scale_tril", @@ -66,7 +64,6 @@ def marginal_guide(design, observation_labels, target_labels): def likelihood_guide(theta_dict, design, observation_labels, target_labels): - theta = torch.cat(list(theta_dict.values()), dim=-1) centre = rmv(design, theta) diff --git a/tests/distributions/test_cuda.py b/tests/distributions/test_cuda.py index 4e8bd70068..bd4d4ba044 100644 --- a/tests/distributions/test_cuda.py +++ b/tests/distributions/test_cuda.py @@ -16,7 +16,6 @@ @requires_cuda def test_sample(dist): for idx in range(len(dist.dist_params)): - # Compute CPU value. with tensors_default_to("cpu"): params = dist.get_dist_params(idx) @@ -41,7 +40,6 @@ def test_rsample(dist): if not dist.pyro_dist.has_rsample: return for idx in range(len(dist.dist_params)): - # Compute CPU value. with tensors_default_to("cpu"): params = dist.get_dist_params(idx) @@ -81,7 +79,6 @@ def test_rsample(dist): @requires_cuda def test_log_prob(dist): for idx in range(len(dist.dist_params)): - # Compute CPU value. with tensors_default_to("cpu"): data = dist.get_test_data(idx) diff --git a/tests/infer/autoguide/test_inference.py b/tests/infer/autoguide/test_inference.py index 228c8443ad..17c0ffbeb1 100644 --- a/tests/infer/autoguide/test_inference.py +++ b/tests/infer/autoguide/test_inference.py @@ -31,7 +31,6 @@ # conjugate model to test AutoGuide logic from end-to-end (this has a non-mean-field posterior) class AutoGaussianChain(GaussianChain): - # this is gross but we need to convert between different posterior factorizations def compute_target(self, N): self.target_auto_mus = torch.zeros(N + 1) diff --git a/tests/infer/test_autoguide.py b/tests/infer/test_autoguide.py index bf6e7499e9..c508f028e3 100644 --- a/tests/infer/test_autoguide.py +++ b/tests/infer/test_autoguide.py @@ -1139,7 +1139,6 @@ def model(x, y=None, batch_size=None): @pytest.mark.parametrize("init_fn", [None, init_to_mean, init_to_median]) @pytest.mark.parametrize("auto_class", [AutoDelta, AutoNormal, AutoGuideList]) def test_subsample_guide(auto_class, init_fn): - # The model from tutorial/source/easyguide.ipynb def model(batch, subsample, full_size): num_time_steps = len(batch) @@ -1190,7 +1189,6 @@ def create_plates(batch, subsample, full_size): @pytest.mark.parametrize("independent", [True, False], ids=["independent", "dependent"]) @pytest.mark.parametrize("auto_class", [AutoDelta, AutoNormal]) def test_subsample_guide_2(auto_class, independent): - # Simplified from Model2 in tutorial/source/forecasting_iii.ipynb def model(data): size, size = data.shape diff --git a/tests/infer/test_compute_downstream_costs.py b/tests/infer/test_compute_downstream_costs.py index 3382a53ebf..4d6ddbf5f2 100644 --- a/tests/infer/test_compute_downstream_costs.py +++ b/tests/infer/test_compute_downstream_costs.py @@ -20,7 +20,6 @@ def _brute_force_compute_downstream_costs( model_trace, guide_trace, non_reparam_nodes # ): - guide_nodes = [ x for x in guide_trace.nodes if guide_trace.nodes[x]["type"] == "sample" ] diff --git a/tests/infer/test_discrete.py b/tests/infer/test_discrete.py index 67f5dde732..416ecba73e 100644 --- a/tests/infer/test_discrete.py +++ b/tests/infer/test_discrete.py @@ -328,7 +328,6 @@ def model(num_particles=1, z=None): ids=["map", "sample", "sample-elbo"], ) def test_hmm_smoke(infer, temperature, length): - # This should match the example in the infer_discrete docstring. def hmm(data, hidden_dim=10): transition = 0.3 / hidden_dim + 0.7 * torch.eye(hidden_dim) diff --git a/tests/infer/test_enum.py b/tests/infer/test_enum.py index 76a6d1b870..be176c9ca9 100644 --- a/tests/infer/test_enum.py +++ b/tests/infer/test_enum.py @@ -4011,7 +4011,6 @@ def guide(): @pytest.mark.parametrize("num_samples", [10000, 100000]) def test_vectorized_importance(num_samples): - pyro.param( "model_probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex ) diff --git a/tests/infer/test_sampling.py b/tests/infer/test_sampling.py index d28dcd14c4..bd1ef247fe 100644 --- a/tests/infer/test_sampling.py +++ b/tests/infer/test_sampling.py @@ -15,7 +15,6 @@ class HMMSamplingTestCase(TestCase): def setUp(self): - # simple Gaussian-emission HMM def model(): p_latent = pyro.param("p1", torch.tensor([[0.7], [0.3]])) @@ -24,7 +23,6 @@ def model(): latents = [torch.ones(1, 1)] observes = [] for t in range(self.model_steps): - latents.append( pyro.sample( "latent_{}".format(str(t)), @@ -52,7 +50,6 @@ def model(): class NormalNormalSamplingTestCase(TestCase): def setUp(self): - pyro.clear_param_store() def model(): diff --git a/tests/infer/test_smcfilter.py b/tests/infer/test_smcfilter.py index b441e0afd3..0b095275ee 100644 --- a/tests/infer/test_smcfilter.py +++ b/tests/infer/test_smcfilter.py @@ -187,7 +187,6 @@ def score_latent(zs, ys): def test_likelihood_ratio(): - model = HarmonicModel() guide = HarmonicGuide() diff --git a/tests/infer/test_valid_models.py b/tests/infer/test_valid_models.py index 9b9a030bf0..3c6cfa8cdc 100644 --- a/tests/infer/test_valid_models.py +++ b/tests/infer/test_valid_models.py @@ -251,7 +251,6 @@ def guide(): "Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO, TraceTMC_ELBO] ) def test_set_has_rsample_ok(has_rsample, Elbo): - # This model has sparse gradients, so users may want to disable # reparametrized sampling to reduce variance of gradient estimates. # However both versions should be correct, i.e. with or without has_rsample. diff --git a/tests/nn/test_module.py b/tests/nn/test_module.py index 90b8af0eec..94a4b76ff9 100644 --- a/tests/nn/test_module.py +++ b/tests/nn/test_module.py @@ -582,7 +582,6 @@ def test_mixin_factory(): def test_to_pyro_module_(): - pyro.set_rng_seed(123) actual = nn.Sequential( nn.Linear(28 * 28, 200), diff --git a/tests/poutine/test_counterfactual.py b/tests/poutine/test_counterfactual.py index 81db18bedc..3d171bfc3c 100644 --- a/tests/poutine/test_counterfactual.py +++ b/tests/poutine/test_counterfactual.py @@ -80,7 +80,6 @@ def model(): def test_plate_duplication_smoke(): def model(N): - with pyro.plate("x_plate", N): z1 = pyro.sample( "z1", dist.MultivariateNormal(torch.zeros(2), torch.eye(2)) diff --git a/tests/poutine/test_poutines.py b/tests/poutine/test_poutines.py index aa6efb5670..9d6e0fefb4 100644 --- a/tests/poutine/test_poutines.py +++ b/tests/poutine/test_poutines.py @@ -235,7 +235,6 @@ def test_block_tutorial_case(self): class QueueHandlerDiscreteTest(TestCase): def setUp(self): - # simple Gaussian-mixture HMM def model(): probs = pyro.param("probs", torch.tensor([[0.8], [0.3]])) @@ -245,7 +244,6 @@ def model(): latents = [torch.ones(1)] observes = [] for t in range(3): - latents.append( pyro.sample( "latent_{}".format(str(t)), @@ -468,7 +466,6 @@ def test_random_module_prior_dict(self): class QueueHandlerMixedTest(TestCase): def setUp(self): - # Simple model with 1 continuous + 1 discrete + 1 continuous variable. def model(): p = torch.tensor([0.5]) @@ -674,7 +671,6 @@ def test_undo_uncondition(self): class EscapeHandlerTests(TestCase): def setUp(self): - # Simple model with 1 continuous + 1 discrete + 1 continuous variable. def model(): p = torch.tensor([0.5]) @@ -881,7 +877,6 @@ def model(): def test_decorator_interface_queue(): - sites = ["x", "y", "z", "_INPUT", "_RETURN"] queue = Queue() queue.put(poutine.Trace())