diff --git a/cmd/goQuery/cmd/list.go b/cmd/goQuery/cmd/list.go index a14c4b10..f6293529 100644 --- a/cmd/goQuery/cmd/list.go +++ b/cmd/goQuery/cmd/list.go @@ -1,9 +1,11 @@ package cmd import ( + "context" "errors" "fmt" "io" + "log/slog" "os" "runtime" "strings" @@ -13,6 +15,7 @@ import ( "github.com/els0r/goProbe/pkg/goDB" "github.com/els0r/goProbe/pkg/goDB/info" "github.com/els0r/goProbe/pkg/query" + "github.com/els0r/telemetry/logging" jsoniter "github.com/json-iterator/go" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -47,16 +50,36 @@ be broken up into IPv4 and IPv6 flows and the drops for that interface will be s } func listInterfacesEntrypoint(_ *cobra.Command, args []string) error { - return listInterfaces(viper.GetString(conf.QueryDBPath), args...) + return listInterfaces(context.Background(), viper.GetString(conf.QueryDBPath), viper.GetString(conf.QueryLog), args...) } // List interfaces for which data is available and show how many flows and // how much traffic was observed for each one. -func listInterfaces(dbPath string, ifaces ...string) error { +func listInterfaces(ctx context.Context, dbPath, queryLogFile string, ifaces ...string) error { queryArgs := cmdLineParams // TODO: consider making this configurable output := os.Stdout + var ifacesMetadata []*goDB.InterfaceMetadata + + logger := logging.FromContext(ctx) + + // create query logger + var qlogger *logging.L + if queryLogFile != "" { + var err error + + logger := logger.With("file", queryLogFile) + logger.Debugf("logging interface list query") + + qlogger, err = logging.New(slog.LevelInfo, logging.EncodingJSON, logging.WithFileOutput(queryLogFile)) + if err != nil { + logger.Errorf("failed to initialize query logger: %v", err) + } else { + qlogger.With("args", queryArgs).Infof("preparing interface list query") + defer qlogger.Info("interface list query finished") + } + } first, last, err := query.ParseTimeRange(queryArgs.First, queryArgs.Last) if err != nil { @@ -96,7 +119,7 @@ func listInterfaces(dbPath string, ifaces ...string) error { dbWorkerManagers = append(dbWorkerManagers, wm) } - var ifacesMetadata = make([]*goDB.InterfaceMetadata, 0, len(dbWorkerManagers)) + ifacesMetadata = make([]*goDB.InterfaceMetadata, 0, len(dbWorkerManagers)) for _, manager := range dbWorkerManagers { manager := manager diff --git a/cmd/goQuery/cmd/root.go b/cmd/goQuery/cmd/root.go index 847c5177..c47eddcf 100644 --- a/cmd/goQuery/cmd/root.go +++ b/cmd/goQuery/cmd/root.go @@ -237,10 +237,13 @@ func entrypoint(cmd *cobra.Command, args []string) (err error) { // in the arguments dbPathCfg := viper.GetString(conf.QueryDBPath) + queryCtx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, os.Interrupt) + defer stop() + // run commands that don't require any argument // handle list flag if cmdLineParams.List { - err := listInterfaces(dbPathCfg) + err := listInterfaces(queryCtx, dbPathCfg, viper.GetString(conf.QueryLog)) if err != nil { return fmt.Errorf("failed to retrieve list of available databases: %w", err) } @@ -288,9 +291,6 @@ func entrypoint(cmd *cobra.Command, args []string) (err error) { // make sure there's protection against unbounded time intervals queryArgs = setDefaultTimeRange(&queryArgs) - queryCtx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, os.Interrupt) - defer stop() - // get logger logger := logging.FromContext(queryCtx) diff --git a/cmd/legacy/main.go b/cmd/legacy/main.go index af31b67f..9d3671ba 100644 --- a/cmd/legacy/main.go +++ b/cmd/legacy/main.go @@ -1,7 +1,6 @@ package main import ( - "errors" "flag" "fmt" "io/fs" @@ -196,9 +195,9 @@ func main() { // Skip input if output already exists (unless -overwrite is specified) if !overwrite { - destPath := gpfile.GenPathForTimestamp(filepath.Join(outPath, iface), epochTS) - if _, err := os.Stat(destPath); !errors.Is(err, fs.ErrNotExist) { - logger.Debugf("skipping already converted dir %s", destPath) + match, exists := gpfile.FindDirForTimestamp(filepath.Join(outPath, iface), epochTS) + if exists { + logger.Debugf("skipping already converted dir %s", match) continue } } diff --git a/examples/analyze-meta/main.go b/examples/analyze-meta/main.go index 27da3dc6..91eecbfd 100644 --- a/examples/analyze-meta/main.go +++ b/examples/analyze-meta/main.go @@ -7,7 +7,6 @@ import ( "io" "os" "path/filepath" - "strconv" "strings" "text/tabwriter" "time" @@ -20,7 +19,8 @@ import ( ) var ( - pathMetaFile string + pathMetaFile string + rewriteMetadata bool ) func main() { @@ -35,21 +35,38 @@ func main() { logger := logging.Logger() flag.StringVar(&pathMetaFile, "path", "", "Path to meta file") + flag.BoolVar(&rewriteMetadata, "rewrite-metadata", false, "Rewrite metadata on disk") flag.Parse() pathMetaFile = filepath.Clean(pathMetaFile) dirPath := filepath.Dir(pathMetaFile) - timestamp, err := strconv.ParseInt(filepath.Base(dirPath), 10, 64) + + timestamp, suffix, err := gpfile.ExtractTimestampMetadataSuffix(filepath.Base(dirPath)) if err != nil { logger.Fatalf("failed to extract timestamp: %s", err) } baseDirPath := filepath.Dir(filepath.Dir(filepath.Dir(dirPath))) - gpDir := gpfile.NewDir(baseDirPath, timestamp, gpfile.ModeRead) + if rewriteMetadata { + gpDir := gpfile.NewDirWriter(baseDirPath, timestamp) + if err := gpDir.Open(); err != nil { + logger.Fatalf("failed to open GPF dir: %v", err) + } + if err := gpDir.Close(); err != nil { + logger.Fatalf("failed to close GPF dir: %v", err) + } + return + } + + gpDir := gpfile.NewDirReader(baseDirPath, timestamp, suffix) if err := gpDir.Open(); err != nil { logger.Fatalf("failed to open GPF dir: %v", err) } - defer gpDir.Close() + defer func() { + if err := gpDir.Close(); err != nil { + logger.Fatalf("failed to close GPF dir: %v", err) + } + }() for i := types.ColumnIndex(0); i < types.ColIdxCount; i++ { err = PrintMetaTable(gpDir, i, os.Stdout) @@ -59,6 +76,8 @@ func main() { } } +// PrintMetaTable displays a comprehensive table with all metadata information read from the +// metadata file func PrintMetaTable(gpDir *gpfile.GPDir, column types.ColumnIndex, w io.Writer) error { blockMetadata := gpDir.BlockMetadata[column] diff --git a/go.mod b/go.mod index 2a93e275..4ac57fa6 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/els0r/telemetry/logging v0.0.0-20231115132112-88976d9255a2 github.com/els0r/telemetry/metrics v0.0.0-20231115132112-88976d9255a2 github.com/els0r/telemetry/tracing v0.0.0-20231115132112-88976d9255a2 - github.com/fako1024/gotools/bitpack v0.0.0-20240419120819-e06ca9ce9e88 + github.com/fako1024/gotools/bitpack v0.0.0-20240429125115-3a9469ff8610 github.com/fako1024/gotools/concurrency v0.0.0-20230905084243-56aa6a34fb53 github.com/fako1024/httpc v1.0.18 github.com/fako1024/slimcap v1.0.4 diff --git a/go.sum b/go.sum index 1a8b94e8..4c5ef635 100644 --- a/go.sum +++ b/go.sum @@ -29,6 +29,8 @@ github.com/els0r/telemetry/tracing v0.0.0-20231115132112-88976d9255a2 h1:cnctcxr github.com/els0r/telemetry/tracing v0.0.0-20231115132112-88976d9255a2/go.mod h1:8Cl578dIQOTDB6DkPXFCOQ0Z5MQeXwhOzZB/mcCHDgA= github.com/fako1024/gotools/bitpack v0.0.0-20240419120819-e06ca9ce9e88 h1:nTZAuKai0dR/olhLiojPnF6dHA749iM/xr2Xg8mtnXo= github.com/fako1024/gotools/bitpack v0.0.0-20240419120819-e06ca9ce9e88/go.mod h1:4wD0uVvDrCUyQd0YxxugT1GYeGtiWVIZ5qhrCWSiH6I= +github.com/fako1024/gotools/bitpack v0.0.0-20240429125115-3a9469ff8610 h1:BIg6mOiuP1dtCD4/6Byakk2FNSX9avA8ckHv9swZ6TQ= +github.com/fako1024/gotools/bitpack v0.0.0-20240429125115-3a9469ff8610/go.mod h1:4wD0uVvDrCUyQd0YxxugT1GYeGtiWVIZ5qhrCWSiH6I= github.com/fako1024/gotools/concurrency v0.0.0-20230905084243-56aa6a34fb53 h1:TUa4dGmK5t0ndG3sWocAN6+DIszheg9iHJSgb4P/dus= github.com/fako1024/gotools/concurrency v0.0.0-20230905084243-56aa6a34fb53/go.mod h1:4mhi2XPVVG6s5zDqL5brEXq2sVZSb4K96zKEJiWrSFs= github.com/fako1024/httpc v1.0.18 h1:8Eqd4dm1CL9v+y4nugFh9kC4DpvK3sA5NP0QzxA72dY= diff --git a/pkg/goDB/DBWorkManager.go b/pkg/goDB/DBWorkManager.go index 12a93d71..9c372289 100644 --- a/pkg/goDB/DBWorkManager.go +++ b/pkg/goDB/DBWorkManager.go @@ -16,6 +16,7 @@ package goDB import ( "context" "fmt" + "io/fs" "os" "path/filepath" "strconv" @@ -108,8 +109,8 @@ func (w *DBWorkManager) CreateWorkerJobs(tfirst int64, tlast int64) (nonempty bo var curDir *gpfile.GPDir workloadBulk := make([]*gpfile.GPDir, 0, WorkBulkSize) - walkFunc := func(numDirs int, dayTimestamp int64) error { - curDir = gpfile.NewDir(w.dbIfaceDir, dayTimestamp, gpfile.ModeRead) + walkFunc := func(numDirs int, dayTimestamp int64, suffix string) error { + curDir = gpfile.NewDirReader(w.dbIfaceDir, dayTimestamp, suffix) // For the first and last item, check out the GPDir metadata for the actual first and // last block timestamp to cover (and adapt variables accordingly) @@ -169,11 +170,11 @@ func (w *DBWorkManager) CreateWorkerJobs(tfirst int64, tlast int64) (nonempty bo return 0 < numDirs, nil } -func skipNonMatching(isDir bool) bool { - return !isDir +func skipNonMatchingDir(entry fs.DirEntry) bool { + return !entry.IsDir() } -type dbWalkFunc func(numDirs int, dayTimestamp int64) error +type dbWalkFunc func(numDirs int, dayTimestamp int64, suffix string) error func (w *DBWorkManager) walkDB(tfirst, tlast int64, fn dbWalkFunc) (numDirs int, err error) { // Get list of years in main directory (ordered by directory name, i.e. time) @@ -186,8 +187,8 @@ func (w *DBWorkManager) walkDB(tfirst, tlast int64, fn dbWalkFunc) (numDirs int, var unixFirst, unixLast = time.Unix(tfirst, 0), time.Unix(tlast+DBWriteInterval, 0) for _, year := range yearList { - // Skip obvious non-matching entries - if skipNonMatching(year.IsDir()) { + // Skip obvious non-matching entries (anything not a directory) + if skipNonMatchingDir(year) { continue } @@ -206,8 +207,9 @@ func (w *DBWorkManager) walkDB(tfirst, tlast int64, fn dbWalkFunc) (numDirs int, return numDirs, err } for _, month := range monthList { - // Skip obvious non-matching entries - if skipNonMatching(month.IsDir()) { + + // Skip obvious non-matching entries (anything not a directory) + if skipNonMatchingDir(month) { continue } @@ -227,19 +229,24 @@ func (w *DBWorkManager) walkDB(tfirst, tlast int64, fn dbWalkFunc) (numDirs int, return numDirs, err } - for _, file := range dirList { - if skipNonMatching(file.IsDir()) { + for _, timestamp := range dirList { + + // Skip obvious non-matching entries (anything not a directory) + if skipNonMatchingDir(timestamp) { continue } - dayTimestamp, err := strconv.ParseInt(file.Name(), 10, 64) + + // Extract the timestamp (and potentially metadata suffix) from the directory name + dayTimestamp, suffix, err := gpfile.ExtractTimestampMetadataSuffix(timestamp.Name()) if err != nil { - return numDirs, fmt.Errorf("failed to parse epoch timestamp from directory `%s`: %w", file.Name(), err) + return numDirs, fmt.Errorf("failed to parse timestamp / suffix from directory `%s`: %w", timestamp.Name(), err) } // check if the directory is within time frame of interest if tfirst < dayTimestamp+gpfile.EpochDay && dayTimestamp < tlast+DBWriteInterval { + // actual processing upon a match - err := fn(numDirs, dayTimestamp) + err := fn(numDirs, dayTimestamp, suffix) if err != nil { return numDirs, err } @@ -267,17 +274,23 @@ func (w *DBWorkManager) ReadMetadata(tfirst int64, tlast int64) (*InterfaceMetad // make sure to start with zero workloads as the number of assigned // workloads depends on how many directories have to be read - var curDir *gpfile.GPDir + var ( + curDir *gpfile.GPDir + currentTimestamp int64 + currentSuffix string + ) - var currentTimestamp int64 + walkFunc := func(numDirs int, dayTimestamp int64, suffix string) error { - walkFunc := func(numDirs int, dayTimestamp int64) error { - currentTimestamp = dayTimestamp - curDir = gpfile.NewDir(w.dbIfaceDir, dayTimestamp, gpfile.ModeRead, gpFileOptions...) + var err error + currentTimestamp, currentSuffix = dayTimestamp, suffix + curDir = gpfile.NewDirReader(w.dbIfaceDir, dayTimestamp, suffix, gpFileOptions...) - err := curDir.Open() - if err != nil { - return fmt.Errorf("failed to open first GPDir %s to ascertain query block timing: %w", curDir.Path(), err) + if curDir.Metadata == nil { + err = curDir.Open() + if err != nil { + return fmt.Errorf("failed to open GPDir %s to ascertain query block timing: %w", curDir.Path(), err) + } } // do the metadata compuation based on the metadata @@ -286,6 +299,14 @@ func (w *DBWorkManager) ReadMetadata(tfirst int64, tlast int64) (*InterfaceMetad // compute the metadata for the first day. If a "first" time argument is given, // the partial day has to be computed if numDirs == 0 { + + if !curDir.IsOpen() { + err = curDir.Open() + if err != nil { + return fmt.Errorf("failed to open GPDir %s to ascertain query block timing: %w", curDir.Path(), err) + } + } + dirFirst, _ := curDir.TimeRange() if tfirst >= dirFirst { // subtract all entries that are smaller than w.tFirstCovered because they were added in the day loop @@ -313,8 +334,10 @@ func (w *DBWorkManager) ReadMetadata(tfirst int64, tlast int64) (*InterfaceMetad w.tFirstCovered = dirFirst } } - if err := curDir.Close(); err != nil { - return fmt.Errorf("failed to close first GPDir %s after ascertaining query block timing: %w", curDir.Path(), err) + if curDir.IsOpen() { + if err := curDir.Close(); err != nil { + return fmt.Errorf("failed to close first GPDir %s after ascertaining query block timing: %w", curDir.Path(), err) + } } return nil } @@ -327,7 +350,7 @@ func (w *DBWorkManager) ReadMetadata(tfirst int64, tlast int64) (*InterfaceMetad // compute the metadata for the last block. This will be partial if the last timestamp is smaller than the last // block captured for the day if curDir != nil { - curDir = gpfile.NewDir(w.dbIfaceDir, currentTimestamp, gpfile.ModeRead, gpFileOptions...) + curDir = gpfile.NewDirReader(w.dbIfaceDir, currentTimestamp, currentSuffix, gpFileOptions...) if err := curDir.Open(); err != nil { return nil, fmt.Errorf("failed to open last GPDir %s to ascertain query block timing: %w", curDir.Path(), err) diff --git a/pkg/goDB/db_writer.go b/pkg/goDB/db_writer.go index c1dd4db0..36ff2aa7 100644 --- a/pkg/goDB/db_writer.go +++ b/pkg/goDB/db_writer.go @@ -66,7 +66,7 @@ func (w *DBWriter) Write(flowmap *hashmap.AggFlowMap, captureStats capturetypes. err error ) - dir := gpfile.NewDir(filepath.Join(w.dbpath, w.iface), timestamp, gpfile.ModeWrite, gpfile.WithPermissions(w.permissions), gpfile.WithEncoderTypeLevel(w.encoderType, w.encoderLevel)) + dir := gpfile.NewDirWriter(filepath.Join(w.dbpath, w.iface), timestamp, gpfile.WithPermissions(w.permissions), gpfile.WithEncoderTypeLevel(w.encoderType, w.encoderLevel)) if err = dir.Open(); err != nil { return fmt.Errorf("failed to create / open daily directory: %w", err) } @@ -97,7 +97,7 @@ func (w *DBWriter) WriteBulk(workloads []BulkWorkload, dirTimestamp int64) (err update gpfile.Stats ) - dir := gpfile.NewDir(filepath.Join(w.dbpath, w.iface), dirTimestamp, gpfile.ModeWrite, gpfile.WithPermissions(w.permissions), gpfile.WithEncoderTypeLevel(w.encoderType, w.encoderLevel)) + dir := gpfile.NewDirWriter(filepath.Join(w.dbpath, w.iface), dirTimestamp, gpfile.WithPermissions(w.permissions), gpfile.WithEncoderTypeLevel(w.encoderType, w.encoderLevel)) if err = dir.Open(); err != nil { return fmt.Errorf("failed to create / open daily directory: %w", err) } diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/.blockmeta b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/.blockmeta similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/.blockmeta rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/.blockmeta diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/bytes_rcvd.gpf b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/bytes_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/bytes_rcvd.gpf rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/bytes_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/bytes_sent.gpf b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/bytes_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/bytes_sent.gpf rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/bytes_sent.gpf diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/dip.gpf b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/dip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/dip.gpf rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/dip.gpf diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/dport.gpf b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/dport.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/dport.gpf rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/dport.gpf diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/pkts_rcvd.gpf b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/pkts_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/pkts_rcvd.gpf rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/pkts_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/pkts_sent.gpf b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/pkts_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/pkts_sent.gpf rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/pkts_sent.gpf diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/proto.gpf b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/proto.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/proto.gpf rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/proto.gpf diff --git a/pkg/goDB/engine/testdb/eth0/2016/02/1456444800/sip.gpf b/pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/sip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth0/2016/02/1456444800/sip.gpf rename to pkg/goDB/engine/testdb/eth0/2016/02/1456444800_mm1-0-eE3-SvUZv-SF9Du1-CFM4-Ppa6/sip.gpf diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/.blockmeta b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/.blockmeta similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/.blockmeta rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/.blockmeta diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/bytes_rcvd.gpf b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/bytes_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/bytes_rcvd.gpf rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/bytes_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/bytes_sent.gpf b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/bytes_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/bytes_sent.gpf rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/bytes_sent.gpf diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/dip.gpf b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/dip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/dip.gpf rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/dip.gpf diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/dport.gpf b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/dport.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/dport.gpf rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/dport.gpf diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/pkts_rcvd.gpf b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/pkts_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/pkts_rcvd.gpf rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/pkts_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/pkts_sent.gpf b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/pkts_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/pkts_sent.gpf rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/pkts_sent.gpf diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/proto.gpf b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/proto.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/proto.gpf rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/proto.gpf diff --git a/pkg/goDB/engine/testdb/eth1/2016/02/1456444800/sip.gpf b/pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/sip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth1/2016/02/1456444800/sip.gpf rename to pkg/goDB/engine/testdb/eth1/2016/02/1456444800_oX-1-aVB1-8PILs4-AK9rM2-0Gt8-05he/sip.gpf diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/.blockmeta b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/.blockmeta similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/.blockmeta rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/.blockmeta diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/bytes_rcvd.gpf b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/bytes_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/bytes_rcvd.gpf rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/bytes_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/bytes_sent.gpf b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/bytes_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/bytes_sent.gpf rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/bytes_sent.gpf diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/dip.gpf b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/dip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/dip.gpf rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/dip.gpf diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/dport.gpf b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/dport.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/dport.gpf rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/dport.gpf diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/pkts_rcvd.gpf b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/pkts_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/pkts_rcvd.gpf rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/pkts_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/pkts_sent.gpf b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/pkts_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/pkts_sent.gpf rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/pkts_sent.gpf diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/proto.gpf b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/proto.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/proto.gpf rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/proto.gpf diff --git a/pkg/goDB/engine/testdb/eth2/2016/02/1456444800/sip.gpf b/pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/sip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/eth2/2016/02/1456444800/sip.gpf rename to pkg/goDB/engine/testdb/eth2/2016/02/1456444800_w3-0-0-2mW1-KQ81-DA-Xo/sip.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/.blockmeta b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/.blockmeta similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/.blockmeta rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/.blockmeta diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/bytes_rcvd.gpf b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/bytes_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/bytes_rcvd.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/bytes_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/bytes_sent.gpf b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/bytes_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/bytes_sent.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/bytes_sent.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/dip.gpf b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/dip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/dip.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/dip.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/dport.gpf b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/dport.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/dport.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/dport.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/pkts_rcvd.gpf b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/pkts_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/pkts_rcvd.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/pkts_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/pkts_sent.gpf b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/pkts_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/pkts_sent.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/pkts_sent.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/proto.gpf b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/proto.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/proto.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/proto.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/sip.gpf b/pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/sip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800/sip.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde/2016/02/1456444800_Ns-0-0-fXANh-hb5ap-Eum1-o3w1/sip.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/.blockmeta b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/.blockmeta similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/.blockmeta rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/.blockmeta diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/bytes_rcvd.gpf b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/bytes_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/bytes_rcvd.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/bytes_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/bytes_sent.gpf b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/bytes_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/bytes_sent.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/bytes_sent.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/dip.gpf b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/dip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/dip.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/dip.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/dport.gpf b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/dport.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/dport.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/dport.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/pkts_rcvd.gpf b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/pkts_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/pkts_rcvd.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/pkts_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/pkts_sent.gpf b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/pkts_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/pkts_sent.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/pkts_sent.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/proto.gpf b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/proto.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/proto.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/proto.gpf diff --git a/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/sip.gpf b/pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/sip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800/sip.gpf rename to pkg/goDB/engine/testdb/t_c1_fwde1/2016/02/1456444800_a6-0-0-I6k54-JwjJ-Ltg-nij/sip.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/.blockmeta b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/.blockmeta similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/.blockmeta rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/.blockmeta diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/bytes_rcvd.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/bytes_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/bytes_rcvd.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/bytes_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/bytes_sent.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/bytes_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/bytes_sent.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/bytes_sent.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/dip.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/dip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/dip.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/dip.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/dport.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/dport.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/dport.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/dport.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/pkts_rcvd.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/pkts_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/pkts_rcvd.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/pkts_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/pkts_sent.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/pkts_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/pkts_sent.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/pkts_sent.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/proto.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/proto.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/proto.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/proto.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/sip.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/sip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800/sip.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fw1/2016/02/1456444800_v1-0-0-yr7-j86-g9-P5/sip.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/.blockmeta b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/.blockmeta similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/.blockmeta rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/.blockmeta diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/bytes_rcvd.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/bytes_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/bytes_rcvd.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/bytes_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/bytes_sent.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/bytes_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/bytes_sent.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/bytes_sent.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/dip.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/dip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/dip.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/dip.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/dport.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/dport.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/dport.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/dport.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/pkts_rcvd.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/pkts_rcvd.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/pkts_rcvd.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/pkts_rcvd.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/pkts_sent.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/pkts_sent.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/pkts_sent.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/pkts_sent.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/proto.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/proto.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/proto.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/proto.gpf diff --git a/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/sip.gpf b/pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/sip.gpf similarity index 100% rename from pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800/sip.gpf rename to pkg/goDB/engine/testdb/tun_3g_c1_fwde/2016/02/1456444800_v1-0-0-cg6-bg5-18-T4/sip.gpf diff --git a/pkg/goDB/godb_test.go b/pkg/goDB/godb_test.go index c772b619..f3c8ce7a 100644 --- a/pkg/goDB/godb_test.go +++ b/pkg/goDB/godb_test.go @@ -135,7 +135,7 @@ func populateTestDir(t *testing.T, basePath, iface string, timestamp time.Time) testPath := filepath.Join(basePath, iface) - f := gpfile.NewDir(testPath, timestamp.Unix(), gpfile.ModeWrite) + f := gpfile.NewDirWriter(testPath, timestamp.Unix()) require.Nil(t, f.Open()) data, update := dbData(generateFlows()) diff --git a/pkg/goDB/storage/gpfile/gpdir.go b/pkg/goDB/storage/gpfile/gpdir.go index 820be2ee..e27aac7b 100644 --- a/pkg/goDB/storage/gpfile/gpdir.go +++ b/pkg/goDB/storage/gpfile/gpdir.go @@ -22,8 +22,7 @@ const ( // EpochDay is one day in seconds EpochDay int64 = 86400 - metadataFileName = ".blockmeta" - maxUint32 = 1<<32 - 1 // 4294967295 + maxUint32 = 1<<32 - 1 // 4294967295 ) var ( @@ -40,106 +39,83 @@ var ( // ErrDirNotOpen denotes that a GPDir is not (yet) open or has been closed ErrDirNotOpen = errors.New("GPDir not open, call Open() first") -) -// TrafficMetadata denotes a serializable set of metadata information about traffic stats -type TrafficMetadata struct { - NumV4Entries uint64 `json:"num_v4_entries"` - NumV6Entries uint64 `json:"num_v6_entries"` - NumDrops uint64 `json:"num_drops"` -} + // ErrInvalidDirName denotes that the provided name for the GPDir is invalid + ErrInvalidDirName = errors.New("invalid GPDir path / name") +) -// Stats denotes statistics for a GPDir instance -type Stats struct { - Counts types.Counters `json:"counts"` - Traffic TrafficMetadata `json:"traffic"` -} +// GPDir denotes a timestamped goDB directory (usually a daily set of blocks) +type GPDir struct { + gpFiles [types.ColIdxCount]*GPFile // Set of GPFile (lazy-load) -// NumFlows returns the total number of flows -func (t TrafficMetadata) NumFlows() uint64 { - return t.NumV4Entries + t.NumV6Entries -} + options []Option // Options (forwarded to all GPFiles) + basePath string // goDB base path (up to interface) + dirTimestampPath string // GPDir path (up to GPDir timestanp) + dirPath string // GPDir path (full path including GPDir timestanp and potential metadata suffix) + metaPath string // Full path to GPDir metadata + accessMode int // Access mode (also forwarded to all GPFiles) + permissions os.FileMode // Permissions (also forwarded to all GPFiles) -// Add computes the sum of two sets of TrafficMetadata -func (t TrafficMetadata) Add(t2 TrafficMetadata) TrafficMetadata { - t.NumDrops += t2.NumDrops - t.NumV4Entries += t2.NumV4Entries - t.NumV6Entries += t2.NumV6Entries - return t + isOpen bool + *Metadata } -// Sub computes the difference of two sets of TrafficMetadata -func (t TrafficMetadata) Sub(t2 TrafficMetadata) TrafficMetadata { - t.NumDrops -= t2.NumDrops - t.NumV4Entries -= t2.NumV4Entries - t.NumV6Entries -= t2.NumV6Entries - return t -} +// SplitTimestampMetadataSuffix is a convenience function that performs timestamp (and potentially +// metadata prefix) extraction for the GPDir path / directory name +func ExtractTimestampMetadataSuffix(filename string) (timestamp int64, metadataSuffix string, err error) { -// Add computes the sum of all counters and traffic metadata for the stats -func (s Stats) Add(s2 Stats) Stats { - s.Counts.Add(s2.Counts) - s.Traffic = s.Traffic.Add(s2.Traffic) - return s -} + // Split by delimeter and perform minumum validation + splitName := strings.Split(filename, "_") + if len(splitName) == 0 { + err = ErrInvalidDirName + return + } -// Sub computes the sum of all counters and traffic metadata for the stats -func (s Stats) Sub(s2 Stats) Stats { - s.Counts.Sub(s2.Counts) - s.Traffic = s.Traffic.Sub(s2.Traffic) - return s -} + // Check if a suffix is present and extract it + if len(splitName) > 1 { + metadataSuffix = splitName[1] + } -// Metadata denotes a serializable set of metadata (both globally and per-block) -type Metadata struct { - BlockMetadata [types.ColIdxCount]*storage.BlockHeader - BlockTraffic []TrafficMetadata + // Parse timestamp from prefix + timestamp, err = strconv.ParseInt(splitName[0], 10, 64) - Stats - Version uint64 + return } -// newMetadata initializes a new Metadata set (internal / serialization use only) -func newMetadata() *Metadata { - m := Metadata{ - BlockTraffic: make([]TrafficMetadata, 0), - Version: headerVersion, - } - for i := 0; i < int(types.ColIdxCount); i++ { - m.BlockMetadata[i] = &storage.BlockHeader{ - CurrentOffset: 0, - BlockList: make([]storage.BlockAtTime, 0), - } +// NewDirWriter instantiates a new directory for writing +func NewDirWriter(basePath string, timestamp int64, options ...Option) *GPDir { + obj := GPDir{ + basePath: strings.TrimSuffix(basePath, "/"), + accessMode: ModeWrite, + permissions: defaultPermissions, + options: options, } - return &m -} - -// GPDir denotes a timestamped goDB directory (usually a daily set of blocks) -type GPDir struct { - gpFiles [types.ColIdxCount]*GPFile // Set of GPFile (lazy-load) - options []Option // Options (forwarded to all GPFiles) - basePath string // goDB base path (up to interface) - dirPath string // GPDir path (up to GPDir timestanp) - metaPath string // Full path to GPDir metadata - accessMode int // Access mode (also forwarded to all GPFiles) - permissions os.FileMode // Permissions (also forwarded to all GPFiles) + obj.dirTimestampPath, obj.dirPath = genWritePathForTimestamp(basePath, timestamp) + obj.metaPath = filepath.Join(obj.dirPath, metadataFileName) - isOpen bool - *Metadata + return &obj } -// NewDir instantiates a new directory (doesn't yet do anything) -func NewDir(basePath string, timestamp int64, accessMode int, options ...Option) *GPDir { +// NewDir instantiates a new directory (doesn't yet do anything except for potentially +// reading / decoding a subset of the metadata from a provided string suffix) +func NewDirReader(basePath string, timestamp int64, metadataSuffix string, options ...Option) *GPDir { obj := GPDir{ - basePath: filepath.Clean(strings.TrimSuffix(basePath, "/")), - accessMode: accessMode, + basePath: strings.TrimSuffix(basePath, "/"), + accessMode: ModeRead, permissions: defaultPermissions, options: options, } - obj.dirPath = GenPathForTimestamp(basePath, timestamp) + obj.dirPath = genReadPathForTimestamp(basePath, timestamp, metadataSuffix) obj.metaPath = filepath.Join(obj.dirPath, metadataFileName) + + // If metdadata was provided via a suffix, attempt to read / decode it and fall + // back to doing nothing in case it fails + if metadataSuffix != "" { + obj.setMetadataFromSuffix(metadataSuffix) + } + return &obj } @@ -192,6 +168,11 @@ func (d *GPDir) Open(options ...Option) error { return nil } +// IsOpen returns if the GPFile instance is currently opened +func (d *GPDir) IsOpen() bool { + return d.isOpen +} + // NumIPv4EntriesAtIndex returns the number of IPv4 entries for a given block index func (d *GPDir) NumIPv4EntriesAtIndex(blockIdx int) uint64 { return d.BlockTraffic[blockIdx].NumV4Entries @@ -254,6 +235,11 @@ func (d *GPDir) TimeRange() (first int64, last int64) { d.BlockMetadata[0].Blocks()[d.BlockMetadata[0].NBlocks()-1].Timestamp } +const ( + minMetadataFileSize = 73 + minMetadataFileSizePos = minMetadataFileSize - 1 +) + // Unmarshal reads and unmarshals a serialized metadata set into the GPDir instance func (d *GPDir) Unmarshal(r concurrency.ReadWriteSeekCloser) error { @@ -262,16 +248,12 @@ func (d *GPDir) Unmarshal(r concurrency.ReadWriteSeekCloser) error { if err != nil { return err } - defer func() { - if cerr := memFile.Close(); cerr != nil && err == nil { - err = cerr - } - }() data := memFile.Data() - if len(data) < 16 { + if len(data) < minMetadataFileSize { return fmt.Errorf("%w (len: %d)", ErrInputSizeTooSmall, len(data)) } + _ = data[minMetadataFileSizePos] // Compiler hint d.Metadata = newMetadata() @@ -284,7 +266,7 @@ func (d *GPDir) Unmarshal(r concurrency.ReadWriteSeekCloser) error { d.Metadata.Counts.BytesSent = binary.BigEndian.Uint64(data[48:56]) // Get global Counters (BytesSent) d.Metadata.Counts.PacketsRcvd = binary.BigEndian.Uint64(data[56:64]) // Get global Counters (PacketsRcvd) d.Metadata.Counts.PacketsSent = binary.BigEndian.Uint64(data[64:72]) // Get global Counters (PacketsSent) - pos := 72 + pos := minMetadataFileSizePos // Get block information for i := 0; i < int(types.ColIdxCount); i++ { @@ -319,7 +301,7 @@ func (d *GPDir) Unmarshal(r concurrency.ReadWriteSeekCloser) error { pos += 16 } - return nil + return memFile.Close() } // Marshal marshals and writes the metadata of the GPDir instance into serialized metadata set @@ -360,7 +342,7 @@ func (d *GPDir) Marshal(w concurrency.ReadWriteSeekCloser) error { binary.BigEndian.PutUint64(data[48:56], d.Metadata.Counts.BytesSent) // Store global Counters (BytesSent) binary.BigEndian.PutUint64(data[56:64], d.Metadata.Counts.PacketsRcvd) // Store global Counters (PacketsRcvd) binary.BigEndian.PutUint64(data[64:72], d.Metadata.Counts.PacketsSent) // Store global Counters (PacketsSent) - pos := 72 + pos := minMetadataFileSizePos if nBlocks > 0 { @@ -417,7 +399,7 @@ func (d *GPDir) Marshal(w concurrency.ReadWriteSeekCloser) error { return nil } -// Path returns the path of the GPDir (up to the timestamp) +// Path returns the path of the GPDir (up to the timestamp and including a potential metadata suffix) func (d *GPDir) Path() string { return d.dirPath } @@ -482,7 +464,7 @@ func (d *GPDir) Column(colIdx types.ColumnIndex) (*GPFile, error) { if d.gpFiles[colIdx] == nil { var err error - if d.gpFiles[colIdx], err = New(filepath.Join(d.Path(), types.ColumnFileNames[colIdx]+FileSuffix), d.BlockMetadata[colIdx], d.accessMode, d.options...); err != nil { + if d.gpFiles[colIdx], err = New(filepath.Join(d.dirPath, types.ColumnFileNames[colIdx]+FileSuffix), d.BlockMetadata[colIdx], d.accessMode, d.options...); err != nil { return nil, err } } @@ -522,20 +504,26 @@ func (d *GPDir) writeMetadataAtomic() error { } // Move the temporary file - return os.Rename(tempFile.Name(), d.MetadataPath()) + if err = os.Rename(tempFile.Name(), d.metaPath); err != nil { + return err + } + + // Move / rename the output directory (if suffix has changed) + if curDirPath, newDirPath := d.dirPath, d.dirTimestampPath+d.Metadata.MarshalString(); curDirPath != newDirPath { + return os.Rename(curDirPath, newDirPath) + } + return nil } func (d *GPDir) setPermissions(permissions fs.FileMode) { d.permissions = permissions } -// GenPathForTimestamp provides a unified generator method that allows to construct the path to -// the data on disk based on a base path and a timestamp -func GenPathForTimestamp(basePath string, timestamp int64) string { - dayTimestamp := DirTimestamp(timestamp) - dayUnix := time.Unix(dayTimestamp, 0) - - return filepath.Join(basePath, strconv.Itoa(dayUnix.Year()), fmt.Sprintf("%02d", dayUnix.Month()), strconv.FormatInt(dayTimestamp, 10)) +func (d *GPDir) setMetadataFromSuffix(metadataSuffix string) { + meta := new(Metadata) // no need to use newMetadata() since no block information is used + if err := meta.UnmarshalString(metadataSuffix); err == nil { + d.Metadata = meta + } } // DirTimestamp returns timestamp rounded down to the nearest directory time frame (usually a day) @@ -543,6 +531,70 @@ func DirTimestamp(timestamp int64) int64 { return (timestamp / EpochDay) * EpochDay } +// FindDirForTimestamp finds the full path to a GPDir given a timestamp (if the metadata suffix is +// not known). +// Note: This is inherently slow due to the search, so it should not be used to determine directories +// used for queries (currently only used in legacy DB conversion tool) +// TODO: Remove once legacy tool has been purged +func FindDirForTimestamp(basePath string, timestamp int64) (match string, found bool) { + dayTimestamp := DirTimestamp(timestamp) + dayUnix := time.Unix(dayTimestamp, 0) + + monthDir := filepath.Join(basePath, strconv.Itoa(dayUnix.Year()), padNumber(int64(dayUnix.Month()))) + dirents, err := os.ReadDir(monthDir) + if err != nil { + return + } + + // Find a matching directory using prefix-based binary search + if match, found = binarySearchPrefix(dirents, strconv.FormatInt(dayTimestamp, 10)); found { + match = filepath.Join(monthDir, match) + } + + return +} + +func genWritePathForTimestamp(basePath string, timestamp int64) (string, string) { + dayTimestamp := DirTimestamp(timestamp) + dayUnix := time.Unix(dayTimestamp, 0) + + searchPath := filepath.Join(basePath, strconv.Itoa(dayUnix.Year()), padNumber(int64(dayUnix.Month()))) + prefix := strconv.FormatInt(dayTimestamp, 10) + + initialDirPath := filepath.Join(searchPath, prefix) + dirents, err := os.ReadDir(searchPath) + if err != nil { + return initialDirPath, initialDirPath + } + + // Find a matching directory using prefix-based binary search + if match, found := binarySearchPrefix(dirents, prefix); found { + return initialDirPath, filepath.Join(searchPath, match) + } + + return initialDirPath, initialDirPath +} + +// genReadPathForTimestamp provides a unified generator method that allows to construct the path to +// the data on disk based on a base path, a timestamp and a metadata suffix +func genReadPathForTimestamp(basePath string, timestamp int64, metadataSuffix string) string { + dayTimestamp := DirTimestamp(timestamp) + dayUnix := time.Unix(dayTimestamp, 0) + + if metadataSuffix == "" { + return filepath.Join(basePath, strconv.Itoa(dayUnix.Year()), padNumber(int64(dayUnix.Month())), strconv.FormatInt(dayTimestamp, 10)) + } + + return filepath.Join(basePath, strconv.Itoa(dayUnix.Year()), padNumber(int64(dayUnix.Month())), strconv.FormatInt(dayTimestamp, 10)+"_"+metadataSuffix) +} + +func padNumber(n int64) string { + if n < 10 { + return "0" + strconv.FormatInt(n, 10) + } + return strconv.FormatInt(n, 10) +} + func calculateDirPerm(filePerm os.FileMode) os.FileMode { if filePerm&0400 != 0 { @@ -557,3 +609,29 @@ func calculateDirPerm(filePerm os.FileMode) os.FileMode { return filePerm } + +// This method provides a simple & fast custom prefix-based binary search for a []fs.DirEntry slice +// in order to avoid having to jump through hoops and take the performance penalty of using the +// standard sort facilities for this custom type +func binarySearchPrefix(slice []fs.DirEntry, prefix string) (match string, found bool) { + + if prefix == "" { + return + } + + low := 0 + high := len(slice) - 1 + for low <= high { + mid := (low + high) / 2 + elem := slice[mid].Name() + if strings.HasPrefix(elem, prefix) { + return elem, true + } else if elem < prefix { + low = mid + 1 + } else { + high = mid - 1 + } + } + + return +} diff --git a/pkg/goDB/storage/gpfile/gpfile_test.go b/pkg/goDB/storage/gpfile/gpfile_test.go index 1d3e3e7f..2d87918f 100644 --- a/pkg/goDB/storage/gpfile/gpfile_test.go +++ b/pkg/goDB/storage/gpfile/gpfile_test.go @@ -7,6 +7,7 @@ import ( "io/fs" "os" "path/filepath" + "slices" "testing" "time" @@ -23,6 +24,7 @@ const ( ) var ( + testDirPath = filepath.Join(testBasePath, "test_db") testFilePath = filepath.Join(testBasePath, "test.gpf") testEncoders = []encoders.Type{ @@ -58,7 +60,7 @@ func TestDirPermissions(t *testing.T) { 0644, } { t.Run(perm.String(), func(t *testing.T) { - require.Nil(t, os.RemoveAll("/tmp/test_db")) + require.Nil(t, os.RemoveAll(testDirPath)) // default (no option provided) should amount to default permissions opts := []Option{WithPermissions(perm)} @@ -67,15 +69,15 @@ func TestDirPermissions(t *testing.T) { perm = defaultPermissions } - testDir := NewDir("/tmp/test_db", 1000, ModeWrite, opts...) + testDir := NewDirWriter(testDirPath, 1000, opts...) require.Nil(t, testDir.Open(), "error opening test dir for writing") require.Nil(t, testDir.Close(), "error closing test dir") - stat, err := os.Stat("/tmp/test_db") + stat, err := os.Stat(testDirPath) require.Nil(t, err, "failed to call Stat() on new GPDir") require.Equal(t, stat.Mode().Perm(), calculateDirPerm(perm), stat.Mode().String()) - stat, err = os.Stat(filepath.Join("/tmp/test_db/1970/01/0", metadataFileName)) + stat, err = os.Stat(filepath.Join(testDirPath, "1970/01/0_0-0-0-0-0-0-0", metadataFileName)) require.Nil(t, err, "failed to call Stat() on block metadata file") require.Equal(t, stat.Mode().Perm(), perm, stat.Mode().String()) }) @@ -219,23 +221,23 @@ func testRoundtrip(t *testing.T, encType encoders.Type) { func TestInvalidMetadata(t *testing.T) { - require.Nil(t, os.RemoveAll("/tmp/test_db")) - require.Nil(t, os.MkdirAll("/tmp/test_db/1970/01/0", 0750), "error creating test dir for reading") - require.Nil(t, os.WriteFile("/tmp/test_db/1970/01/0/.blockmeta", []byte{0x1}, 0600), "error creating test metdadata for reading") + require.Nil(t, os.RemoveAll(testDirPath)) + require.Nil(t, os.MkdirAll(filepath.Join(testDirPath, "1970/01/0"), 0750), "error creating test dir for reading") + require.Nil(t, os.WriteFile(filepath.Join(testDirPath, "1970/01/0/.blockmeta"), []byte{0x1}, 0600), "error creating test metdadata for reading") - testDir := NewDir("/tmp/test_db", 1000, ModeRead) + testDir := NewDirReader(testDirPath, 1000, "") require.ErrorIs(t, testDir.Open(), ErrInputSizeTooSmall) } func TestEmptyMetadata(t *testing.T) { - require.Nil(t, os.RemoveAll("/tmp/test_db")) + require.Nil(t, os.RemoveAll(testDirPath)) - testDir := NewDir("/tmp/test_db", 1000, ModeWrite) + testDir := NewDirWriter(testDirPath, 1000) require.Nil(t, testDir.Open(), "error opening test dir for writing") require.Nil(t, testDir.Close(), "error writing test dir") - testDir = NewDir("/tmp/test_db", 1000, ModeRead) + testDir = NewDirReader(testDirPath, 1000, "0-0-0-0-0-0-0") require.Nil(t, testDir.Open(), "error opening test dir for reading") for i := 0; i < int(types.ColIdxCount); i++ { @@ -247,9 +249,9 @@ func TestEmptyMetadata(t *testing.T) { func TestMetadataRoundTrip(t *testing.T) { - require.Nil(t, os.RemoveAll("/tmp/test_db")) + require.Nil(t, os.RemoveAll(testDirPath)) - testDir := NewDir("/tmp/test_db", 1000, ModeWrite) + testDir := NewDirWriter(testDirPath, 1000) require.Nil(t, testDir.Open(), "error opening test dir for writing") for i := 0; i < int(types.ColIdxCount); i++ { @@ -298,7 +300,11 @@ func TestMetadataRoundTrip(t *testing.T) { require.Nil(t, jsoniter.NewDecoder(buf).Decode(&refMetadata), "error decoding reference data for later comparison") require.Nil(t, testDir.Close(), "error writing test dir") - testDir = NewDir("/tmp/test_db", 1000, ModeRead) + _, fullPath := genWritePathForTimestamp(testDirPath, 1000) + ts, suffix, err := ExtractTimestampMetadataSuffix(filepath.Base(fullPath)) + require.Nil(t, err) + + testDir = NewDirReader(testDirPath, ts, suffix) require.Nil(t, testDir.Open(), "error opening test dir for reading") require.Equal(t, testDir.Metadata.BlockTraffic, refMetadata.BlockTraffic, "mismatched global block metadata") @@ -321,10 +327,10 @@ func TestMetadataRoundTrip(t *testing.T) { func TestBrokenAccess(t *testing.T) { - require.Nil(t, os.RemoveAll("/tmp/test_db")) + require.Nil(t, os.RemoveAll(testDirPath)) // Write some blocks and flush the data to disk - testDir := NewDir("/tmp/test_db", 1000, ModeWrite) + testDir := NewDirWriter(testDirPath, 1000) require.Nil(t, testDir.Open(), "error opening test dir for writing") require.Nil(t, writeDummyBlock(1, testDir, 1), "failed to write blocks") require.Nil(t, writeDummyBlock(2, testDir, 2), "failed to write blocks") @@ -332,7 +338,7 @@ func TestBrokenAccess(t *testing.T) { require.Nil(t, testDir.Close(), "error writing test dir") // Append another block, flush the GPFiles but "fail" to write the metadata - testDir = NewDir("/tmp/test_db", 1000, ModeWrite) + testDir = NewDirWriter(testDirPath, 1000) require.Nil(t, testDir.Open(), "error opening test dir for writing") require.Nil(t, writeDummyBlock(3, testDir, 3), "failed to write blocks") require.NotEqual(t, expectedOffset, testDir.BlockMetadata[0].CurrentOffset) @@ -342,8 +348,12 @@ func TestBrokenAccess(t *testing.T) { } } + _, fullPath := genWritePathForTimestamp(testDirPath, 1000) + ts, suffix, err := ExtractTimestampMetadataSuffix(filepath.Base(fullPath)) + require.Nil(t, err) + // Read the directory and validate that we only "see" two blocks on metadata level - testDir = NewDir("/tmp/test_db", 1000, ModeRead) + testDir = NewDirReader(testDirPath, ts, suffix) require.Nil(t, testDir.Open(), "error opening test dir for reading") require.Equal(t, expectedOffset, testDir.BlockMetadata[0].CurrentOffset) require.Equal(t, testDir.BlockMetadata[0].NBlocks(), 2) @@ -363,7 +373,7 @@ func TestBrokenAccess(t *testing.T) { }) // Append another two blocks and write normally - testDir = NewDir("/tmp/test_db", 1000, ModeWrite) + testDir = NewDirWriter(testDirPath, 1000) require.Nil(t, testDir.Open(), "error opening test dir for writing") require.Equal(t, expectedOffset, testDir.BlockMetadata[0].CurrentOffset) require.Nil(t, writeDummyBlock(4, testDir, 4), "failed to write blocks") @@ -372,8 +382,12 @@ func TestBrokenAccess(t *testing.T) { require.Equal(t, testDir.BlockMetadata[0].NBlocks(), 4) require.Nil(t, testDir.Close(), "error writing test dir") + _, fullPath = genWritePathForTimestamp(testDirPath, 1000) + ts, suffix, err = ExtractTimestampMetadataSuffix(filepath.Base(fullPath)) + require.Nil(t, err) + // Read the directory and validate that we only "see" four blocks on metadata level - testDir = NewDir("/tmp/test_db", 1000, ModeRead) + testDir = NewDirReader(testDirPath, ts, suffix) require.Nil(t, testDir.Open(), "error opening test dir for reading") require.Equal(t, expectedOffset, testDir.BlockMetadata[0].CurrentOffset) require.Equal(t, testDir.BlockMetadata[0].NBlocks(), 4) @@ -403,13 +417,86 @@ func TestDailyDirectoryPathLayers(t *testing.T) { for year := 1970; year < 2200; year++ { for month := time.January; month <= time.December; month++ { for day := 1; day <= time.Date(year, month+1, 0, 0, 0, 0, 0, time.UTC).Day(); day++ { - gpDir := NewDir("/tmp/test_db", time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Unix(), ModeRead) - require.Equal(t, gpDir.dirPath, filepath.Join("/tmp/test_db", fmt.Sprintf("%d", year), fmt.Sprintf("%02d", month), fmt.Sprintf("%d", DirTimestamp(time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Unix())))) + gpDir := NewDirReader(testDirPath, time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Unix(), "") + require.Equal(t, gpDir.dirPath, filepath.Join(testDirPath, fmt.Sprintf("%d", year), fmt.Sprintf("%02d", month), fmt.Sprintf("%d", DirTimestamp(time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Unix())))) } } } } +type testDirEntry string + +func (t testDirEntry) Name() string { + return string(t) +} + +func (t testDirEntry) IsDir() bool { + return true +} + +func (t testDirEntry) Type() fs.FileMode { + panic("not implemented") // TODO: Implement +} + +func (t testDirEntry) Info() (fs.FileInfo, error) { + panic("not implemented") // TODO: Implement +} + +func TestBinarySearchPrefix(t *testing.T) { + + slice := []fs.DirEntry{ + testDirEntry("apple"), + testDirEntry("banana"), + testDirEntry("cherry"), + testDirEntry("date"), + testDirEntry("grape"), + testDirEntry("kiwi"), + testDirEntry("mango"), + testDirEntry("orange"), + testDirEntry("pear"), + testDirEntry("pineapple"), + testDirEntry("strawberry"), + testDirEntry("blueberry"), + testDirEntry("watermelon"), + testDirEntry("raspberry"), + testDirEntry("blackberry"), + testDirEntry("pomegranate"), + testDirEntry("fig"), + testDirEntry("plum"), + testDirEntry("apricot")} + + slices.SortFunc(slice, func(a, b fs.DirEntry) int { + if a.Name() == b.Name() { + return 0 + } + if a.Name() < b.Name() { + return -1 + } + return 1 + }) + + for _, input := range slice { + + // Validate that a full match returns the correct result + result, found := binarySearchPrefix(slice, input.Name()) + require.True(t, found, input) + require.Equal(t, input.Name(), result) + + // Validate that a prefix match returns the correct result + result, found = binarySearchPrefix(slice, input.Name()[:3]) + require.True(t, found, input) + require.Equal(t, input.Name(), result) + } + + result, found := binarySearchPrefix(slice, "idonotexist") + require.False(t, found) + require.Empty(t, result) + + result, found = binarySearchPrefix(slice, "") + require.False(t, found) + require.Empty(t, result) +} + func (g *GPFile) validateBlocks(nExpected int) error { blocks, err := g.Blocks() if err != nil { diff --git a/pkg/goDB/storage/gpfile/metadata.go b/pkg/goDB/storage/gpfile/metadata.go new file mode 100644 index 00000000..d6084df1 --- /dev/null +++ b/pkg/goDB/storage/gpfile/metadata.go @@ -0,0 +1,156 @@ +package gpfile + +import ( + "errors" + "strings" + "unsafe" + + "github.com/els0r/goProbe/pkg/goDB/storage" + "github.com/els0r/goProbe/pkg/types" + "github.com/fako1024/gotools/bitpack" +) + +const ( + metadataFileName = ".blockmeta" +) + +// TrafficMetadata denotes a serializable set of metadata information about traffic stats +type TrafficMetadata struct { + NumV4Entries uint64 `json:"num_v4_entries"` + NumV6Entries uint64 `json:"num_v6_entries"` + NumDrops uint64 `json:"num_drops"` +} + +// Stats denotes statistics for a GPDir instance +type Stats struct { + Counts types.Counters `json:"counts"` + Traffic TrafficMetadata `json:"traffic"` +} + +// NumFlows returns the total number of flows +func (t TrafficMetadata) NumFlows() uint64 { + return t.NumV4Entries + t.NumV6Entries +} + +// Add computes the sum of two sets of TrafficMetadata +func (t TrafficMetadata) Add(t2 TrafficMetadata) TrafficMetadata { + t.NumDrops += t2.NumDrops + t.NumV4Entries += t2.NumV4Entries + t.NumV6Entries += t2.NumV6Entries + return t +} + +// Sub computes the difference of two sets of TrafficMetadata +func (t TrafficMetadata) Sub(t2 TrafficMetadata) TrafficMetadata { + t.NumDrops -= t2.NumDrops + t.NumV4Entries -= t2.NumV4Entries + t.NumV6Entries -= t2.NumV6Entries + return t +} + +// Add computes the sum of all counters and traffic metadata for the stats +func (s Stats) Add(s2 Stats) Stats { + s.Counts.Add(s2.Counts) + s.Traffic = s.Traffic.Add(s2.Traffic) + return s +} + +// Sub computes the sum of all counters and traffic metadata for the stats +func (s Stats) Sub(s2 Stats) Stats { + s.Counts.Sub(s2.Counts) + s.Traffic = s.Traffic.Sub(s2.Traffic) + return s +} + +// Metadata denotes a serializable set of metadata (both globally and per-block) +type Metadata struct { + BlockMetadata [types.ColIdxCount]*storage.BlockHeader + BlockTraffic []TrafficMetadata + + Stats + Version uint64 +} + +// newMetadata initializes a new Metadata set (internal / serialization use only) +func newMetadata() *Metadata { + m := Metadata{ + BlockTraffic: make([]TrafficMetadata, 0), + Version: headerVersion, + } + for i := 0; i < int(types.ColIdxCount); i++ { + m.BlockMetadata[i] = &storage.BlockHeader{ + CurrentOffset: 0, + BlockList: make([]storage.BlockAtTime, 0), + } + } + return &m +} + +const ( + maxDirnameLength = 96 // accounts for a 12-digit epoch timestamp and 7 worst-case compressed uint64 values & delimeters + + delimUnderscore = 95 // "_" + delimDash = 45 // "-" +) + +// MarshalString marshals (partial) metadata information into a (compressed) string +// representation +func (m *Metadata) MarshalString() string { + + buf := make([]byte, maxDirnameLength) + buf[0] = delimUnderscore + + pos := 1 + + n := bitpack.EncodeUint64ToByteBuf(m.Traffic.NumV4Entries, buf[pos:]) + pos += n + 1 + buf[pos-1] = delimDash + + n = bitpack.EncodeUint64ToByteBuf(m.Traffic.NumV6Entries, buf[pos:]) + pos += n + 1 + buf[pos-1] = delimDash + + n = bitpack.EncodeUint64ToByteBuf(m.Traffic.NumDrops, buf[pos:]) + pos += n + 1 + buf[pos-1] = delimDash + + n = bitpack.EncodeUint64ToByteBuf(m.Counts.BytesRcvd, buf[pos:]) + pos += n + 1 + buf[pos-1] = delimDash + + n = bitpack.EncodeUint64ToByteBuf(m.Counts.BytesSent, buf[pos:]) + pos += n + 1 + buf[pos-1] = delimDash + + n = bitpack.EncodeUint64ToByteBuf(m.Counts.PacketsRcvd, buf[pos:]) + pos += n + 1 + buf[pos-1] = delimDash + + n = bitpack.EncodeUint64ToByteBuf(m.Counts.PacketsSent, buf[pos:]) + pos += n + + // Subslice to string length and cast to string (zero-allocation) + buf = buf[0:pos] + return *(*string)(unsafe.Pointer(&buf)) // #nosec G103 +} + +// UnmarshalString deserializes a string representation of (partial) metadata +// into an existing metadata structure +func (m *Metadata) UnmarshalString(input string) error { + + fields := strings.Split(input, "-") + if len(fields) != 7 { + return errors.New("invalid number of string fields") + } + + m.Traffic.NumV4Entries = bitpack.DecodeUint64FromString(fields[0]) + m.Traffic.NumV6Entries = bitpack.DecodeUint64FromString(fields[1]) + m.Traffic.NumDrops = bitpack.DecodeUint64FromString(fields[2]) + + m.Counts.BytesRcvd = bitpack.DecodeUint64FromString(fields[3]) + m.Counts.BytesSent = bitpack.DecodeUint64FromString(fields[4]) + m.Counts.PacketsRcvd = bitpack.DecodeUint64FromString(fields[5]) + m.Counts.PacketsSent = bitpack.DecodeUint64FromString(fields[6]) + + return nil +}