Skip to content

Commit

Permalink
NETOBSERV-1269 refactor overlapping detection for BNF (#379)
Browse files Browse the repository at this point in the history
This fix allows working on situations as described in
https://issues.redhat.com/browse/NETOBSERV-1269 and, hopefully, is a
more generic / consistent solution anyway.

Changing how overlapping results are managed: work on results rather
than on query to remove overlapping parts.

When fetching flows, we don't explicitly remove the overlaps, but this
is managed implicitly with the existing dedup functions

When fetching metrics, we consider that the computed metrics for any
label set is the ORIGINAL metrics + the SWAPPED metrics - the OVERLAP
metrics.

Add tests, fix tests

Note that adjustment is necessary in MetricsQuerySummary test as the
"endWithTolerance" calculation has changed a little bit, modifying
computed rates ... new values are still close to the previous ones

Rename callback function
  • Loading branch information
jotak authored Sep 8, 2023
1 parent bffb860 commit a21bd6a
Show file tree
Hide file tree
Showing 9 changed files with 718 additions and 430 deletions.
44 changes: 25 additions & 19 deletions web/src/components/netflow-traffic.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import { useTranslation } from 'react-i18next';
import { useTheme } from '../utils/theme-hook';
import { Record } from '../api/ipfix';
import { GenericMetric, Stats, TopologyMetrics } from '../api/loki';
import { getGenericMetrics, getFlows, getTopologyMetrics } from '../api/routes';
import { getGenericMetrics } from '../api/routes';
import {
DisabledFilters,
Filter,
Expand All @@ -45,13 +45,13 @@ import {
} from '../model/filters';
import {
FlowQuery,
groupFilters,
Match,
MetricFunction,
FlowScope,
MetricType,
PacketLoss,
RecordType
RecordType,
filtersToString
} from '../model/flow-query';
import { MetricScopeOptions } from '../model/metrics';
import { parseQuickFilters } from '../model/quick-filters';
Expand Down Expand Up @@ -142,6 +142,8 @@ import { exportToPng } from '../utils/export';
import { navigate } from './dynamic-loader/dynamic-loader';
import { LinksOverflow } from './overflow/links-overflow';
import { mergeFlowReporters } from '../utils/flows';
import { getFetchFunctions as getBackAndForthFetch } from '../utils/back-and-forth';
import { mergeStats } from '../utils/metrics';

export type ViewId = 'overview' | 'table' | 'topology';

Expand Down Expand Up @@ -339,9 +341,8 @@ export const NetflowTraffic: React.FC<{

const buildFlowQuery = React.useCallback((): FlowQuery => {
const enabledFilters = getEnabledFilters(forcedFilters || filters);
const groupedFilters = groupFilters(enabledFilters, match === 'any');
const query: FlowQuery = {
filters: groupedFilters,
filters: filtersToString(enabledFilters.list, match === 'any'),
limit: LIMIT_VALUES.includes(limit) ? limit : LIMIT_VALUES[0],
recordType: recordType,
dedup: !showDuplicates,
Expand Down Expand Up @@ -387,6 +388,13 @@ export const NetflowTraffic: React.FC<{
topologyOptions.groupTypes
]);

const getFetchFunctions = React.useCallback(() => {
// check back-and-forth
const enabledFilters = getEnabledFilters(forcedFilters || filters);
const matchAny = match === 'any';
return getBackAndForthFetch(enabledFilters, matchAny);
}, [forcedFilters, filters, match]);

const manageWarnings = React.useCallback(
(query: Promise<unknown>) => {
Promise.race([query, new Promise((resolve, reject) => setTimeout(reject, 2000, 'slow'))]).then(
Expand All @@ -403,20 +411,12 @@ export const NetflowTraffic: React.FC<{
[]
);

const mergeStats = (prev: Stats | undefined, current: Stats): Stats => {
if (!prev) {
return current;
}
return {
...prev,
limitReached: prev.limitReached || current.limitReached,
numQueries: prev.numQueries + current.numQueries
};
};

const fetchTable = React.useCallback(
(fq: FlowQuery, droppedType: MetricType | undefined) => {
setMetrics([]);

const { getFlows, getTopologyMetrics } = getFetchFunctions();

// table query is based on histogram range if available
const tableQuery = { ...fq };
if (histogramRange) {
Expand Down Expand Up @@ -452,12 +452,15 @@ export const NetflowTraffic: React.FC<{
}
return Promise.all(promises);
},
[histogramRange, range, showHistogram, showDuplicates]
[histogramRange, range, showHistogram, showDuplicates, getFetchFunctions]
);

const fetchOverview = React.useCallback(
(fq: FlowQuery, droppedType: MetricType | undefined) => {
setFlows([]);

const { getTopologyMetrics } = getFetchFunctions();

const promises: Promise<Stats>[] = [
//get bytes or packets
getTopologyMetrics(fq, range).then(res => {
Expand Down Expand Up @@ -555,12 +558,15 @@ export const NetflowTraffic: React.FC<{
}
return Promise.all(promises);
},
[range, config.features]
[range, config.features, getFetchFunctions]
);

const fetchTopology = React.useCallback(
(fq: FlowQuery, droppedType: MetricType | undefined) => {
setFlows([]);

const { getTopologyMetrics } = getFetchFunctions();

const promises: Promise<Stats>[] = [
//get bytes or packets
getTopologyMetrics(fq, range).then(res => {
Expand All @@ -581,7 +587,7 @@ export const NetflowTraffic: React.FC<{
}
return Promise.all(promises);
},
[range]
[range, getFetchFunctions]
);

const tick = React.useCallback(() => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ describe('<MetricsQuerySummary />', () => {

it('should show summary', async () => {
const wrapper = mount(<MetricsQuerySummary {...mocks} />);
expect(wrapper.find('#bytesCount').last().text()).toBe('7 MB');
expect(wrapper.find('#bytesCount').last().text()).toBe('6.8 MB');
expect(wrapper.find('#packetsCount')).toHaveLength(0);
expect(wrapper.find('#bpsCount').last().text()).toBe('23.2 kBps');
expect(wrapper.find('#bpsCount').last().text()).toBe('22.79 kBps');
expect(wrapper.find('#lastRefresh').last().text()).toBe(now.toLocaleTimeString());
});

Expand Down
Loading

0 comments on commit a21bd6a

Please sign in to comment.