From 431d18dfb53cdd7d53befeb15dc30dcc5d01a531 Mon Sep 17 00:00:00 2001 From: Andrei Matei Date: Mon, 16 Dec 2019 19:09:17 -0500 Subject: [PATCH 1/4] backupccl: change the error code for "file already exists" errors The backup code used to use a class 58 error code ("system error") for situations where a backup target already exists - DuplicateFile. Class 58 is the wrong one, particularly since we've started using 58 errors to represent errors about the state of the cluster (range unavailable, dropped connections). So clients should treat 58 errors as retriable (and for example the scaledata tests do). This patch switches to a new code in "Class 42 - Syntax or Access Rule Violation". It's hard to imagine that Postgres returns the 58 code for anything related to user input. Release note (sql change): The error code for backups which would overwrite files changed from class 58 ("system") to class 42 ("Syntax or Access Rule Violation"). --- pkg/ccl/backupccl/backup.go | 6 +++--- pkg/sql/pgwire/pgcode/codes.go | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/ccl/backupccl/backup.go b/pkg/ccl/backupccl/backup.go index 68a9b1c6d3c0..1da108098e50 100644 --- a/pkg/ccl/backupccl/backup.go +++ b/pkg/ccl/backupccl/backup.go @@ -938,7 +938,7 @@ func VerifyUsableExportTarget( // TODO(dt): If we audit exactly what not-exists error each ExternalStorage // returns (and then wrap/tag them), we could narrow this check. r.Close() - return pgerror.Newf(pgcode.DuplicateFile, + return pgerror.Newf(pgcode.FileAlreadyExists, "%s already contains a %s file", readable, BackupDescriptorName) } @@ -946,13 +946,13 @@ func VerifyUsableExportTarget( // TODO(dt): If we audit exactly what not-exists error each ExternalStorage // returns (and then wrap/tag them), we could narrow this check. r.Close() - return pgerror.Newf(pgcode.DuplicateFile, + return pgerror.Newf(pgcode.FileAlreadyExists, "%s already contains a %s file", readable, BackupManifestName) } if r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil { r.Close() - return pgerror.Newf(pgcode.DuplicateFile, + return pgerror.Newf(pgcode.FileAlreadyExists, "%s already contains a %s file (is another operation already in progress?)", readable, BackupDescriptorCheckpointName) } diff --git a/pkg/sql/pgwire/pgcode/codes.go b/pkg/sql/pgwire/pgcode/codes.go index 16e3c43115d4..1d4cfc1fbdd0 100644 --- a/pkg/sql/pgwire/pgcode/codes.go +++ b/pkg/sql/pgwire/pgcode/codes.go @@ -222,6 +222,7 @@ const ( InvalidSchemaDefinition = "42P15" InvalidTableDefinition = "42P16" InvalidObjectDefinition = "42P17" + FileAlreadyExists = "42C01" // Class 44 - WITH CHECK OPTION Violation WithCheckOptionViolation = "44000" // Class 53 - Insufficient Resources From 710fa23e31c44091efb62488beaebb4ba9a69e18 Mon Sep 17 00:00:00 2001 From: Georgia Hong Date: Mon, 16 Dec 2019 17:08:17 -0500 Subject: [PATCH 2/4] blobs: Reduce file size of benchmark tests Release note: None --- pkg/blobs/bench_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/blobs/bench_test.go b/pkg/blobs/bench_test.go index c1c79e93382e..755a9e73bdd3 100644 --- a/pkg/blobs/bench_test.go +++ b/pkg/blobs/bench_test.go @@ -24,6 +24,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" ) +// filesize should be at least 1 GB when running these benchmarks. +// Reduced to 129 K for CI. +const filesize = 129 * 1 << 10 + type benchmarkTestCase struct { localNodeID roachpb.NodeID remoteNodeID roachpb.NodeID @@ -68,7 +72,7 @@ func BenchmarkStreamingReadFile(b *testing.B) { localExternalDir: localExternalDir, remoteExternalDir: remoteExternalDir, blobClient: blobClient, - fileSize: 1 << 30, // 1 GB + fileSize: filesize, fileName: "test/largefile.csv", } benchmarkStreamingReadFile(b, params) @@ -119,7 +123,7 @@ func BenchmarkStreamingWriteFile(b *testing.B) { localExternalDir: localExternalDir, remoteExternalDir: remoteExternalDir, blobClient: blobClient, - fileSize: 1 << 30, // 1 GB + fileSize: filesize, fileName: "test/largefile.csv", } benchmarkStreamingWriteFile(b, params) From e5ed1befdb0bff9013741d1bc463a4786532fcb3 Mon Sep 17 00:00:00 2001 From: Peter Mattis Date: Mon, 16 Dec 2019 07:35:31 -0500 Subject: [PATCH 3/4] storage/engine: small logging fixes Change `Pebble.GetCompactionStats` to be prefixed with a newline to match the formatting of RocksDB. This ensures that the compaction stats display will not contain the log prefix which was misaligning the table header. Adding a missing sort to `Pebble.GetSSTables`. This was causing the sstable summary log message to be much busier than for RocksDB because `SSTableInfos.String` expects the infos to be sorted. Move the formatting of `estimated_pending_compaction_bytes: x` into `RocksDB.GetCompactionStats`. The Pebble compaction stats already included this and it is useful to see the estimated pending compaction bytes whenever the compaction stats are output. Release note: None --- pkg/storage/engine/pebble.go | 8 +++++++- pkg/storage/engine/rocksdb.go | 9 ++++++++- pkg/storage/store.go | 4 +--- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/pkg/storage/engine/pebble.go b/pkg/storage/engine/pebble.go index d4bced4c94b7..c6e073eb5146 100644 --- a/pkg/storage/engine/pebble.go +++ b/pkg/storage/engine/pebble.go @@ -17,6 +17,7 @@ import ( "io" "io/ioutil" "os" + "sort" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -487,7 +488,10 @@ func (p *Pebble) Get(key MVCCKey) ([]byte, error) { // GetCompactionStats implements the Engine interface. func (p *Pebble) GetCompactionStats() string { - return p.db.Metrics().String() + // NB: The initial blank line matches the formatting used by RocksDB and + // ensures that compaction stats display will not contain the log prefix + // (this method is only used for logging purposes). + return "\n" + p.db.Metrics().String() } // GetTickersAndHistograms implements the Engine interface. @@ -879,6 +883,8 @@ func (p *Pebble) GetSSTables() (sstables SSTableInfos) { sstables = append(sstables, info) } } + + sort.Sort(sstables) return sstables } diff --git a/pkg/storage/engine/rocksdb.go b/pkg/storage/engine/rocksdb.go index fe3fceae3037..1360e7f70cc2 100644 --- a/pkg/storage/engine/rocksdb.go +++ b/pkg/storage/engine/rocksdb.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -1261,7 +1262,13 @@ func (r *RocksDB) GetTickersAndHistograms() (*enginepb.TickersAndHistograms, err // GetCompactionStats returns the internal RocksDB compaction stats. See // https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#rocksdb-statistics. func (r *RocksDB) GetCompactionStats() string { - return cStringToGoString(C.DBGetCompactionStats(r.rdb)) + s := cStringToGoString(C.DBGetCompactionStats(r.rdb)) + + "estimated_pending_compaction_bytes: " + stats, err := r.GetStats() + if err != nil { + return s + err.Error() + } + return s + humanizeutil.IBytes(stats.PendingCompactionBytesEstimate) } // GetEnvStats returns stats for the RocksDB env. This may include encryption stats. diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 1cd6704c47b7..5399fba51785 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -53,7 +53,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/contextutil" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/limit" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" @@ -2354,8 +2353,7 @@ func (s *Store) ComputeMetrics(ctx context.Context, tick int) error { // stats. if tick%logSSTInfoTicks == 1 /* every 10m */ { log.Infof(ctx, "sstables (read amplification = %d):\n%s", readAmp, sstables) - log.Infof(ctx, "%sestimated_pending_compaction_bytes: %s", - s.engine.GetCompactionStats(), humanizeutil.IBytes(stats.PendingCompactionBytesEstimate)) + log.Infof(ctx, "%s", s.engine.GetCompactionStats()) } return nil } From ebd4b844827d2cb71b95121a025d514477a1a10f Mon Sep 17 00:00:00 2001 From: Oliver Tan Date: Tue, 3 Dec 2019 16:24:00 -0800 Subject: [PATCH 4/4] sql: acceptance test and test cleanup for TimeTZ This PR completes the TimeTZ saga! * Added Java unit tests * Removed some tests from the test whitelist * Added postgres regress suite. Fix parse error to use New instead of Wrap, as the Wrap actually confuses the error message more. Release note (sql change): This PR (along with a string of past PRs) allows the usage of TimeTZ throughout cockroach. --- .../testdata/java/src/main/java/MainTest.java | 9 ++ pkg/sql/logictest/testdata/logic_test/timetz | 113 ++++++++++++++++++ pkg/util/timetz/timetz.go | 3 +- 3 files changed, 123 insertions(+), 2 deletions(-) diff --git a/pkg/acceptance/testdata/java/src/main/java/MainTest.java b/pkg/acceptance/testdata/java/src/main/java/MainTest.java index f1ac9750fd63..b68c787630f4 100644 --- a/pkg/acceptance/testdata/java/src/main/java/MainTest.java +++ b/pkg/acceptance/testdata/java/src/main/java/MainTest.java @@ -177,6 +177,15 @@ public void testTime() throws Exception { Assert.assertEquals("01:02:03.456", actual); } + @Test + public void testTimeTZ() throws Exception { + PreparedStatement stmt = conn.prepareStatement("SELECT '01:02:03.456-07:00'::TIMETZ"); + ResultSet rs = stmt.executeQuery(); + rs.next(); + String actual = new SimpleDateFormat("HH:mm:ss.SSSZ").format(rs.getTime(1)); + Assert.assertEquals("08:02:03.456+0000", actual); + } + @Test public void testUUID() throws Exception { UUID uuid = UUID.randomUUID(); diff --git a/pkg/sql/logictest/testdata/logic_test/timetz b/pkg/sql/logictest/testdata/logic_test/timetz index efc36d14521b..7323890ff7ef 100644 --- a/pkg/sql/logictest/testdata/logic_test/timetz +++ b/pkg/sql/logictest/testdata/logic_test/timetz @@ -526,3 +526,116 @@ query R SELECT extract(epoch from timetz '12:00:00+04') ---- 28800 + +# Adapted from `src/test/regress/expected/timetz.out` in postgres +subtest regress_postgres + +statement ok +CREATE TABLE TIMETZ_TBL (id serial primary key, f1 time(2) with time zone) + +# Changed PDT/PST/EDT -> zone offsets, as pgdate does not support abbreviations. +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('00:01-07') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('01:00-07') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('02:03-07') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('07:07-05') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('08:08-04') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('11:59-07') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('12:00-07') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('12:01-07') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('23:59-07') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('11:59:59.99 PM-07') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('2003-03-07 15:36:39 America/New_York') + +statement ok +INSERT INTO TIMETZ_TBL (f1) VALUES ('2003-07-07 15:36:39 America/New_York') + +# pgdate supports this, but postgres does not. +# INSERT INTO TIMETZ_TBL (f1) VALUES ('15:36:39 America/New_York') + +# this should fail (timezone not specified without a date) +query error could not parse "1970-01-01 15:36:39 m2" as TimeTZ +INSERT INTO TIMETZ_TBL (f1) VALUES ('15:36:39 m2') + +# this should fail (dynamic timezone abbreviation without a date) +query error could not parse "1970-01-01 15:36:39 MSK m2" as TimeTZ +INSERT INTO TIMETZ_TBL (f1) VALUES ('15:36:39 MSK m2') + +query T +SELECT f1::string AS "Time TZ" FROM TIMETZ_TBL ORDER BY id +---- +00:01:00-07:00:00 +01:00:00-07:00:00 +02:03:00-07:00:00 +07:07:00-05:00:00 +08:08:00-04:00:00 +11:59:00-07:00:00 +12:00:00-07:00:00 +12:01:00-07:00:00 +23:59:00-07:00:00 +23:59:59.99-07:00:00 +15:36:39-05:00:00 +15:36:39-04:00:00 + +query T +SELECT f1::string AS "Three" FROM TIMETZ_TBL WHERE f1 < '05:06:07-07' ORDER BY id +---- +00:01:00-07:00:00 +01:00:00-07:00:00 +02:03:00-07:00:00 + +query T +SELECT f1::string AS "Seven" FROM TIMETZ_TBL WHERE f1 > '05:06:07-07' ORDER BY id +---- +07:07:00-05:00:00 +08:08:00-04:00:00 +11:59:00-07:00:00 +12:00:00-07:00:00 +12:01:00-07:00:00 +23:59:00-07:00:00 +23:59:59.99-07:00:00 +15:36:39-05:00:00 +15:36:39-04:00:00 + +query T +SELECT f1::string AS "None" FROM TIMETZ_TBL WHERE f1 < '00:00-07' ORDER BY id +---- + +query T +SELECT f1::string AS "Ten" FROM TIMETZ_TBL WHERE f1 >= '00:00-07' ORDER BY id +---- +00:01:00-07:00:00 +01:00:00-07:00:00 +02:03:00-07:00:00 +07:07:00-05:00:00 +08:08:00-04:00:00 +11:59:00-07:00:00 +12:00:00-07:00:00 +12:01:00-07:00:00 +23:59:00-07:00:00 +23:59:59.99-07:00:00 +15:36:39-05:00:00 +15:36:39-04:00:00 + +query error pq: unsupported binary operator: \+ +SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TIMETZ_TBL ORDER BY id diff --git a/pkg/util/timetz/timetz.go b/pkg/util/timetz/timetz.go index 955e9aebf704..ccb1422a2edc 100644 --- a/pkg/util/timetz/timetz.go +++ b/pkg/util/timetz/timetz.go @@ -104,8 +104,7 @@ func ParseTimeTZ(now time.Time, s string, precision time.Duration) (TimeTZ, erro t, err := pgdate.ParseTimestamp(now, pgdate.ParseModeYMD, s) if err != nil { // Build our own error message to avoid exposing the dummy date. - return TimeTZ{}, pgerror.Wrapf( - err, + return TimeTZ{}, pgerror.Newf( pgcode.InvalidTextRepresentation, "could not parse %q as TimeTZ", s,