Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use eval_batch_size for AutoEncoder loss stats #861

Merged
15 changes: 7 additions & 8 deletions morpheus/models/dfencoder/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -905,15 +905,14 @@ def _fit_centralized(self, df, epochs=1, val=None, run_validation=False, use_val

# Early stopping
current_net_loss = net_loss
LOG.debug('The Current Net Loss:', current_net_loss)
LOG.debug('The Current Net Loss: %s', current_net_loss)

if current_net_loss > last_loss:
count_es += 1
LOG.debug('Early stop count:', count_es)
LOG.debug('Early stop count: %s', count_es)

if count_es >= self.patience:
LOG.debug('Early stopping: early stop count({}) >= patience({})'.format(
count_es, self.patience))
LOG.debug('Early stopping: early stop count(%s) >= patience(%s)', count_es, self.patience)
break

else:
Expand Down Expand Up @@ -1617,15 +1616,15 @@ def get_anomaly_score_losses(self, df):
"""
self.eval()

n_batches = len(df) // self.batch_size
if len(df) % self.batch_size > 0:
n_batches = len(df) // self.eval_batch_size
if len(df) % self.eval_batch_size > 0:
n_batches += 1

mse_loss_slices, bce_loss_slices, cce_loss_slices = [], [], []
with torch.no_grad():
for i in range(n_batches):
start = i * self.batch_size
stop = (i + 1) * self.batch_size
start = i * self.eval_batch_size
stop = (i + 1) * self.eval_batch_size

df_slice = df.iloc[start:stop]
data_slice = self.prepare_df(df_slice)
Expand Down