-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
mutation_builder_unique.go
299 lines (264 loc) · 11.1 KB
/
mutation_builder_unique.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optbuilder
import (
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
)
// buildUniqueChecksForInsert builds uniqueness check queries for an insert.
// These check queries are used to enforce UNIQUE WITHOUT INDEX constraints.
func (mb *mutationBuilder) buildUniqueChecksForInsert() {
// We only need to build unique checks if there is at least one unique
// constraint without an index.
if !mb.hasUniqueWithoutIndexConstraints() {
return
}
mb.ensureWithID()
h := &mb.uniqueCheckHelper
for i, n := 0, mb.tab.UniqueCount(); i < n; i++ {
// If this constraint is already enforced by an index we don't need to plan
// a check.
if mb.tab.Unique(i).WithoutIndex() && h.init(mb, i) {
mb.uniqueChecks = append(mb.uniqueChecks, h.buildInsertionCheck())
}
}
telemetry.Inc(sqltelemetry.UniqueChecksUseCounter)
}
// buildUniqueChecksForUpdate builds uniqueness check queries for an update.
// These check queries are used to enforce UNIQUE WITHOUT INDEX constraints.
func (mb *mutationBuilder) buildUniqueChecksForUpdate() {
// We only need to build unique checks if there is at least one unique
// constraint without an index.
if !mb.hasUniqueWithoutIndexConstraints() {
return
}
mb.ensureWithID()
h := &mb.uniqueCheckHelper
for i, n := 0, mb.tab.UniqueCount(); i < n; i++ {
// If this constraint is already enforced by an index or doesn't include
// the updated columns we don't need to plan a check.
if mb.tab.Unique(i).WithoutIndex() && mb.uniqueColsUpdated(i) && h.init(mb, i) {
// The insertion check works for updates too since it simply checks that
// the unique columns in the newly inserted or updated rows do not match
// any existing rows. The check prevents rows from matching themselves by
// adding a filter based on the primary key.
mb.uniqueChecks = append(mb.uniqueChecks, h.buildInsertionCheck())
}
}
telemetry.Inc(sqltelemetry.UniqueChecksUseCounter)
}
// buildUniqueChecksForUpsert builds uniqueness check queries for an upsert.
// These check queries are used to enforce UNIQUE WITHOUT INDEX constraints.
func (mb *mutationBuilder) buildUniqueChecksForUpsert() {
// We only need to build unique checks if there is at least one unique
// constraint without an index.
if !mb.hasUniqueWithoutIndexConstraints() {
return
}
mb.ensureWithID()
h := &mb.uniqueCheckHelper
for i, n := 0, mb.tab.UniqueCount(); i < n; i++ {
// If this constraint is already enforced by an index we don't need to plan
// a check.
if mb.tab.Unique(i).WithoutIndex() && h.init(mb, i) {
// The insertion check works for upserts too since it simply checks that
// the unique columns in the newly inserted or updated rows do not match
// any existing rows. The check prevents rows from matching themselves by
// adding a filter based on the primary key.
mb.uniqueChecks = append(mb.uniqueChecks, h.buildInsertionCheck())
}
}
telemetry.Inc(sqltelemetry.UniqueChecksUseCounter)
}
// hasUniqueWithoutIndexConstraints returns true if there are any
// UNIQUE WITHOUT INDEX constraints on the table.
func (mb *mutationBuilder) hasUniqueWithoutIndexConstraints() bool {
for i, n := 0, mb.tab.UniqueCount(); i < n; i++ {
if mb.tab.Unique(i).WithoutIndex() {
return true
}
}
return false
}
// uniqueColsUpdated returns true if any of the columns for a unique
// constraint are being updated (according to updateColIDs).
func (mb *mutationBuilder) uniqueColsUpdated(uniqueOrdinal int) bool {
uc := mb.tab.Unique(uniqueOrdinal)
for i, n := 0, uc.ColumnCount(); i < n; i++ {
if ord := uc.ColumnOrdinal(mb.tab, i); mb.updateColIDs[ord] != 0 {
return true
}
}
return false
}
// uniqueCheckHelper is a type associated with a single unique constraint and
// is used to build the "leaves" of a unique check expression, namely the
// WithScan of the mutation input and the Scan of the table.
type uniqueCheckHelper struct {
mb *mutationBuilder
unique cat.UniqueConstraint
uniqueOrdinal int
// uniqueOrdinals are the table ordinals of the unique columns in the table
// that is being mutated. They correspond 1-to-1 to the columns in the
// UniqueConstraint.
uniqueOrdinals util.FastIntSet
// primaryKeyOrdinals includes the ordinals from any primary key columns
// that are not included in uniqueOrdinals.
primaryKeyOrdinals util.FastIntSet
}
// init initializes the helper with a unique constraint.
//
// Returns false if the constraint should be ignored (e.g. because the new
// values for the unique columns are known to be always NULL).
func (h *uniqueCheckHelper) init(mb *mutationBuilder, uniqueOrdinal int) bool {
// This initialization pattern ensures that fields are not unwittingly
// reused. Field reuse must be explicit.
*h = uniqueCheckHelper{
mb: mb,
unique: mb.tab.Unique(uniqueOrdinal),
uniqueOrdinal: uniqueOrdinal,
}
var uniqueOrds util.FastIntSet
for i, n := 0, h.unique.ColumnCount(); i < n; i++ {
uniqueOrds.Add(h.unique.ColumnOrdinal(mb.tab, i))
}
// Find the primary key columns that are not part of the unique constraint.
// If there aren't any, we don't need a check.
// TODO(mgartner): We also don't need a check if there exists a unique index
// with columns that are a subset of the unique constraint columns.
// Similarly, we don't need a check for a partial unique constraint if there
// exists a non-partial unique constraint with columns that are a subset of
// the partial unique constrain columns.
primaryOrds := getIndexLaxKeyOrdinals(mb.tab.Index(cat.PrimaryIndex))
primaryOrds.DifferenceWith(uniqueOrds)
if primaryOrds.Empty() {
// The primary key columns are a subset of the unique columns; unique check
// not needed.
return false
}
h.uniqueOrdinals = uniqueOrds
h.primaryKeyOrdinals = primaryOrds
// Check if we are setting NULL values for the unique columns, like when this
// mutation is the result of a SET NULL cascade action.
numNullCols := 0
for tabOrd, ok := h.uniqueOrdinals.Next(0); ok; tabOrd, ok = h.uniqueOrdinals.Next(tabOrd + 1) {
colID := mb.mapToReturnColID(tabOrd)
if memo.OutputColumnIsAlwaysNull(mb.outScope.expr, colID) {
numNullCols++
}
}
// If at least one unique column is getting a NULL value, unique check not
// needed.
return numNullCols == 0
}
// buildInsertionCheck creates a unique check for rows which are added to a
// table. The input to the insertion check will be produced from the input to
// the mutation operator.
func (h *uniqueCheckHelper) buildInsertionCheck() memo.UniqueChecksItem {
f := h.mb.b.factory
// Build a self semi-join, with the new values on the left and the
// existing values on the right.
scanScope, ordinals := h.buildTableScan()
withScanScope, _ := h.mb.buildCheckInputScan(
checkInputScanNewVals, ordinals,
)
// Build the join filters:
// (new_a = existing_a) AND (new_b = existing_b) AND ...
//
// Set the capacity to h.uniqueOrdinals.Len()+1 since we'll have an equality
// condition for each column in the unique constraint, plus one additional
// condition to prevent rows from matching themselves (see below). If the
// constraint is partial, add 2 to account for filtering both the WithScan
// and the Scan by the partial unique constraint predicate.
numFilters := h.uniqueOrdinals.Len() + 1
_, isPartial := h.unique.Predicate()
if isPartial {
numFilters += 2
}
semiJoinFilters := make(memo.FiltersExpr, 0, numFilters)
for i, ok := h.uniqueOrdinals.Next(0); ok; i, ok = h.uniqueOrdinals.Next(i + 1) {
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(
f.ConstructEq(
f.ConstructVariable(withScanScope.cols[i].id),
f.ConstructVariable(scanScope.cols[i].id),
),
))
}
// If the unique constraint is partial, we need to filter out inserted rows
// that don't satisfy the predicate. We also need to make sure that rows do
// not match existing rows in the the table that do not satisfy the
// predicate. So we add the predicate as a filter on both the WithScan
// columns and the Scan columns.
if isPartial {
pred := h.mb.parseUniqueConstraintPredicateExpr(h.uniqueOrdinal)
typedPred := withScanScope.resolveAndRequireType(pred, types.Bool)
withScanPred := h.mb.b.buildScalar(typedPred, withScanScope, nil, nil, nil)
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(withScanPred))
typedPred = scanScope.resolveAndRequireType(pred, types.Bool)
scanPred := h.mb.b.buildScalar(typedPred, scanScope, nil, nil, nil)
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(scanPred))
}
// We need to prevent rows from matching themselves in the semi join. We can
// do this by adding another filter that uses the primary keys to check if
// two rows are identical:
// (new_pk1 != existing_pk1) OR (new_pk2 != existing_pk2) OR ...
var pkFilter opt.ScalarExpr
for i, ok := h.primaryKeyOrdinals.Next(0); ok; i, ok = h.primaryKeyOrdinals.Next(i + 1) {
pkFilterLocal := f.ConstructNe(
f.ConstructVariable(withScanScope.cols[i].id),
f.ConstructVariable(scanScope.cols[i].id),
)
if pkFilter == nil {
pkFilter = pkFilterLocal
} else {
pkFilter = f.ConstructOr(pkFilter, pkFilterLocal)
}
}
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(pkFilter))
semiJoin := f.ConstructSemiJoin(withScanScope.expr, scanScope.expr, semiJoinFilters, memo.EmptyJoinPrivate)
// Collect the key columns that will be shown in the error message if there
// is a duplicate key violation resulting from this uniqueness check.
keyCols := make(opt.ColList, 0, h.uniqueOrdinals.Len())
for i, ok := h.uniqueOrdinals.Next(0); ok; i, ok = h.uniqueOrdinals.Next(i + 1) {
keyCols = append(keyCols, withScanScope.cols[i].id)
}
return f.ConstructUniqueChecksItem(semiJoin, &memo.UniqueChecksItemPrivate{
Table: h.mb.tabID,
CheckOrdinal: h.uniqueOrdinal,
KeyCols: keyCols,
OpName: h.mb.opName,
})
}
// buildTableScan builds a Scan of the table. The ordinals of the columns
// scanned are also returned.
func (h *uniqueCheckHelper) buildTableScan() (outScope *scope, ordinals []int) {
tabMeta := h.mb.b.addTable(h.mb.tab, tree.NewUnqualifiedTableName(h.mb.tab.Name()))
// TODO(mgartner): Include mutation columns once schema changes are
// transactional.
ordinals = tableOrdinals(tabMeta.Table, columnKinds{
includeMutations: false,
includeSystem: false,
includeVirtualInverted: false,
includeVirtualComputed: true,
})
return h.mb.b.buildScan(
tabMeta,
ordinals,
nil, /* indexFlags */
noRowLocking,
h.mb.b.allocScope(),
), ordinals
}