From 33fffa33ce75f06e47336742040cd3bbf8ba0249 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 11 Feb 2022 16:30:23 +0100 Subject: [PATCH 01/35] util: Make encryption passphrase size a parameter fscrypt support requires keys longer than 20 bytes. As a preparation, make the new passphrase length configurable, but default to 20 bytes. Signed-off-by: Marcel Lauhoff --- internal/rbd/encryption.go | 4 +++- internal/util/crypto.go | 10 +++++----- internal/util/crypto_test.go | 6 +++--- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/internal/rbd/encryption.go b/internal/rbd/encryption.go index 0b632aa9955..4afd8b4f766 100644 --- a/internal/rbd/encryption.go +++ b/internal/rbd/encryption.go @@ -61,6 +61,8 @@ const ( // DEK is stored. metadataDEK = "rbd.csi.ceph.com/dek" oldMetadataDEK = ".rbd.csi.ceph.com/dek" + + encryptionPassphraseSize = 20 ) // checkRbdImageEncrypted verifies if rbd image was encrypted when created. @@ -100,7 +102,7 @@ func (ri *rbdImage) isEncrypted() bool { // - the Data-Encryption-Key (DEK) will be generated stored for use by the KMS; // - the RBD image will be marked to support encryption in its metadata. func (ri *rbdImage) setupEncryption(ctx context.Context) error { - err := ri.encryption.StoreNewCryptoPassphrase(ri.VolID) + err := ri.encryption.StoreNewCryptoPassphrase(ri.VolID, encryptionPassphraseSize) if err != nil { log.ErrorLog(ctx, "failed to save encryption passphrase for "+ "image %s: %s", ri, err) diff --git a/internal/util/crypto.go b/internal/util/crypto.go index bec6b18a83a..c7aaf06059a 100644 --- a/internal/util/crypto.go +++ b/internal/util/crypto.go @@ -36,7 +36,7 @@ const ( // Passphrase size - 20 bytes is 160 bits to satisfy: // https://tools.ietf.org/html/rfc6749#section-10.10 - encryptionPassphraseSize = 20 + defaultEncryptionPassphraseSize = 20 ) var ( @@ -156,8 +156,8 @@ func (ve *VolumeEncryption) StoreCryptoPassphrase(volumeID, passphrase string) e } // StoreNewCryptoPassphrase generates a new passphrase and saves it in the KMS. -func (ve *VolumeEncryption) StoreNewCryptoPassphrase(volumeID string) error { - passphrase, err := generateNewEncryptionPassphrase() +func (ve *VolumeEncryption) StoreNewCryptoPassphrase(volumeID string, length int) error { + passphrase, err := generateNewEncryptionPassphrase(length) if err != nil { return fmt.Errorf("failed to generate passphrase for %s: %w", volumeID, err) } @@ -176,8 +176,8 @@ func (ve *VolumeEncryption) GetCryptoPassphrase(volumeID string) (string, error) } // generateNewEncryptionPassphrase generates a random passphrase for encryption. -func generateNewEncryptionPassphrase() (string, error) { - bytesPassphrase := make([]byte, encryptionPassphraseSize) +func generateNewEncryptionPassphrase(length int) (string, error) { + bytesPassphrase := make([]byte, length) _, err := rand.Read(bytesPassphrase) if err != nil { return "", err diff --git a/internal/util/crypto_test.go b/internal/util/crypto_test.go index 28b8fefea77..a5bb49da617 100644 --- a/internal/util/crypto_test.go +++ b/internal/util/crypto_test.go @@ -28,14 +28,14 @@ import ( func TestGenerateNewEncryptionPassphrase(t *testing.T) { t.Parallel() - b64Passphrase, err := generateNewEncryptionPassphrase() + b64Passphrase, err := generateNewEncryptionPassphrase(defaultEncryptionPassphraseSize) require.NoError(t, err) // b64Passphrase is URL-encoded, decode to verify the length of the // passphrase passphrase, err := base64.URLEncoding.DecodeString(b64Passphrase) assert.NoError(t, err) - assert.Equal(t, encryptionPassphraseSize, len(passphrase)) + assert.Equal(t, defaultEncryptionPassphraseSize, len(passphrase)) } func TestKMSWorkflow(t *testing.T) { @@ -56,7 +56,7 @@ func TestKMSWorkflow(t *testing.T) { volumeID := "volume-id" - err = ve.StoreNewCryptoPassphrase(volumeID) + err = ve.StoreNewCryptoPassphrase(volumeID, defaultEncryptionPassphraseSize) assert.NoError(t, err) passphrase, err := ve.GetCryptoPassphrase(volumeID) From 20081caa6ef282a7ea51df28bfc0b858db47972e Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 13 Jul 2022 17:36:59 +0200 Subject: [PATCH 02/35] util: Add util to fetch encryption type from vol options Fetch encryption type from vol options. Make fallback type configurable to support RBD (default block) and Ceph FS (default file) Signed-off-by: Marcel Lauhoff --- internal/util/crypto.go | 44 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/internal/util/crypto.go b/internal/util/crypto.go index c7aaf06059a..2489ab01470 100644 --- a/internal/util/crypto.go +++ b/internal/util/crypto.go @@ -80,6 +80,50 @@ func FetchEncryptionKMSID(encrypted, kmsID string) (string, error) { return kmsID, nil } +type EncryptionType int + +const ( + EncryptionTypeInvalid EncryptionType = iota + EncryptionTypeBlock = iota + EncryptionTypeFile = iota +) + +func ParseEncryptionType(typeStr string) EncryptionType { + switch typeStr { + case "block": + return EncryptionTypeBlock + case "file": + return EncryptionTypeFile + default: + return EncryptionTypeInvalid + } +} + +func EncryptionTypeString(encType EncryptionType) string { + switch encType { + case EncryptionTypeBlock: + return "block" + case EncryptionTypeFile: + return "file" + case EncryptionTypeInvalid: + return "" + default: + return "" + } +} + +// FetchEncryptionType returns encryptionType specified in volOptions. +// If not specified, use fallback. If specified but invalid, return +// invalid. +func FetchEncryptionType(volOptions map[string]string, fallback EncryptionType) EncryptionType { + encType, ok := volOptions["encryptionType"] + if !ok { + return fallback + } + + return ParseEncryptionType(encType) +} + // NewVolumeEncryption creates a new instance of VolumeEncryption and // configures the DEKStore. If the KMS does not provide a DEKStore interface, // the VolumeEncryption will be created *and* a ErrDEKStoreNeeded is returned. From 9b0cdf4dfaa1daa423202e3b826db2f116b3e735 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 11 Mar 2022 19:51:18 +0100 Subject: [PATCH 03/35] kms: Add GetSecret() to metadata KMS Add GetSecret() to allow direct access to passphrases without KDF and wrapping by a DEKStore. This will be used by fscrypt, which has its own KDF and wrapping. It will allow users to take a k8s secret, for example, and use that directly as a password in fscrypt. Signed-off-by: Marcel Lauhoff --- internal/kms/aws_metadata.go | 4 ++++ internal/kms/keyprotect.go | 4 ++++ internal/kms/kms.go | 15 +++++++++++++++ internal/kms/secretskms.go | 5 +++++ 4 files changed, 28 insertions(+) diff --git a/internal/kms/aws_metadata.go b/internal/kms/aws_metadata.go index f77bce6f3e4..f74a5f58194 100644 --- a/internal/kms/aws_metadata.go +++ b/internal/kms/aws_metadata.go @@ -226,3 +226,7 @@ func (kms *awsMetadataKMS) DecryptDEK(volumeID, encryptedDEK string) (string, er return string(result.Plaintext), nil } + +func (kms *awsMetadataKMS) GetSecret(volumeID string) (string, error) { + return "", ErrGetSecretUnsupported +} diff --git a/internal/kms/keyprotect.go b/internal/kms/keyprotect.go index fdc795053f5..d020c75b521 100644 --- a/internal/kms/keyprotect.go +++ b/internal/kms/keyprotect.go @@ -242,3 +242,7 @@ func (kms *keyProtectKMS) DecryptDEK(volumeID, encryptedDEK string) (string, err return string(result), nil } + +func (kms *keyProtectKMS) GetSecret(volumeID string) (string, error) { + return "", ErrGetSecretUnsupported +} diff --git a/internal/kms/kms.go b/internal/kms/kms.go index a42e6d0df82..82eadfd3e89 100644 --- a/internal/kms/kms.go +++ b/internal/kms/kms.go @@ -19,6 +19,7 @@ package kms import ( "context" "encoding/json" + "errors" "fmt" "os" @@ -53,6 +54,11 @@ const ( DefaultKMSType = "default" ) +var ( + ErrGetSecretUnsupported = errors.New("KMS does not support access to user provided secret") + ErrGetSecretIntegrated = errors.New("integrated DEK stores do not allow GetSecret") +) + // GetKMS returns an instance of Key Management System. // // - tenant is the owner of the Volume, used to fetch the Vault Token from the @@ -332,6 +338,11 @@ type EncryptionKMS interface { // function does not need to do anything except return the encyptedDEK // as it was received. DecryptDEK(volumeID, encyptedDEK string) (string, error) + + // GetSecret allows external key management systems to + // retrieve keys used in EncryptDEK / DecryptDEK to use them + // directly. Example: fscrypt uses this to unlock raw protectors + GetSecret(volumeID string) (string, error) } // DEKStoreType describes what DEKStore needs to be configured when using a @@ -377,6 +388,10 @@ func (i integratedDEK) DecryptDEK(volumeID, encyptedDEK string) (string, error) return encyptedDEK, nil } +func (i integratedDEK) GetSecret(volumeID string) (string, error) { + return "", ErrGetSecretIntegrated +} + // getKeys takes a map that uses strings for keys and returns a slice with the // keys. func getKeys(m map[string]interface{}) []string { diff --git a/internal/kms/secretskms.go b/internal/kms/secretskms.go index 6fb4793103f..4b4866c79f1 100644 --- a/internal/kms/secretskms.go +++ b/internal/kms/secretskms.go @@ -263,6 +263,11 @@ func (kms secretsMetadataKMS) DecryptDEK(volumeID, encryptedDEK string) (string, return string(dek), nil } +func (kms secretsMetadataKMS) GetSecret(volumeID string) (string, error) { + // use the passphrase from the secretKMS + return kms.secretsKMS.FetchDEK(volumeID) +} + // generateCipher returns a AEAD cipher based on a passphrase and salt // (volumeID). The cipher can then be used to encrypt/decrypt the DEK. func generateCipher(passphrase, salt string) (cipher.AEAD, error) { From 7f083c298f4dce3b65e6d11e1051f7bd206aa20b Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 17 Aug 2022 16:08:48 +0200 Subject: [PATCH 04/35] kms: testing: add KMS test dummy registry Add registry similar to the providers one. This allows testers to add and use GetKMSTestDummy() to create stripped down provider instances suitable for use in unit tests. Signed-off-by: Marcel Lauhoff --- internal/kms/dummy.go | 71 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 internal/kms/dummy.go diff --git a/internal/kms/dummy.go b/internal/kms/dummy.go new file mode 100644 index 00000000000..fdf7ac45921 --- /dev/null +++ b/internal/kms/dummy.go @@ -0,0 +1,71 @@ +/* +Copyright 2022 The Ceph-CSI Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kms + +import "encoding/base64" + +type TestDummyFunc func() EncryptionKMS + +type ProviderTest struct { + UniqueID string + CreateTestDummy TestDummyFunc +} + +type kmsTestProviderList struct { + providers map[string]ProviderTest +} + +var kmsTestManager = kmsTestProviderList{providers: map[string]ProviderTest{}} + +func RegisterTestProvider(provider ProviderTest) bool { + kmsTestManager.providers[provider.UniqueID] = provider + + return true +} + +func GetKMSTestDummy(kmsID string) EncryptionKMS { + provider, ok := kmsTestManager.providers[kmsID] + if !ok { + return nil + } + + return provider.CreateTestDummy() +} + +func GetKMSTestProvider() map[string]ProviderTest { + return kmsTestManager.providers +} + +func newDefaultTestDummy() EncryptionKMS { + return secretsKMS{passphrase: base64.URLEncoding.EncodeToString( + []byte("test dummy passphrase"))} +} + +func newSecretsMetadataTestDummy() EncryptionKMS { + smKMS := secretsMetadataKMS{} + smKMS.secretsKMS = secretsKMS{passphrase: base64.URLEncoding.EncodeToString( + []byte("test dummy passphrase"))} + + return smKMS +} + +var _ = RegisterTestProvider(ProviderTest{ + UniqueID: kmsTypeSecretsMetadata, + CreateTestDummy: newSecretsMetadataTestDummy, +}) + +var _ = RegisterTestProvider(ProviderTest{ + UniqueID: DefaultKMSType, + CreateTestDummy: newDefaultTestDummy, +}) From 2f57e5e45683154294bcac021c3306f2058cf134 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 29 Apr 2022 20:23:24 +0200 Subject: [PATCH 05/35] kms: Add basic GetSecret() test Add rudimentary test to ensure that we can get a valid passphrase from the GetSecret() feature Signed-off-by: Marcel Lauhoff --- internal/util/getsecret_test.go | 52 +++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 internal/util/getsecret_test.go diff --git a/internal/util/getsecret_test.go b/internal/util/getsecret_test.go new file mode 100644 index 00000000000..59e61e9920c --- /dev/null +++ b/internal/util/getsecret_test.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Ceph-CSI Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "testing" + + kmsapi "github.com/ceph/ceph-csi/internal/kms" + + "github.com/stretchr/testify/assert" +) + +func TestGetPassphraseFromKMS(t *testing.T) { + t.Parallel() + + for _, provider := range kmsapi.GetKMSTestProvider() { + if provider.CreateTestDummy == nil { + continue + } + kms := kmsapi.GetKMSTestDummy(provider.UniqueID) + assert.NotNil(t, kms) + + volEnc, err := NewVolumeEncryption(provider.UniqueID, kms) + if errors.Is(err, ErrDEKStoreNeeded) { + _, err = volEnc.KMS.GetSecret("") + if errors.Is(err, kmsapi.ErrGetSecretUnsupported) { + continue // currently unsupported by fscrypt integration + } + } + assert.NotNil(t, volEnc) + + if kms.RequiresDEKStore() == kmsapi.DEKStoreIntegrated { + continue + } + + secret, err := kms.GetSecret("") + assert.NoError(t, err, provider.UniqueID) + assert.NotEmpty(t, secret, provider.UniqueID) + } +} From 0551c0b23fc5e504d68b5a306268b0cab1729f30 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 27 May 2022 20:03:32 +0200 Subject: [PATCH 06/35] rbd: Rename encryption to blockEncryption prep for fscrypt In preparation of fscrypt support for RBD filesystems, rename block encryption related function to include the word 'block'. Add struct fields and IsFileEncrypted. Signed-off-by: Marcel Lauhoff --- internal/rbd/controllerserver.go | 2 +- internal/rbd/encryption.go | 47 ++++++++++++++++++-------------- internal/rbd/nodeserver.go | 16 +++++------ internal/rbd/rbd_attach.go | 2 +- internal/rbd/rbd_journal.go | 12 ++++---- internal/rbd/rbd_util.go | 32 +++++++++++++--------- internal/rbd/snapshot.go | 2 +- 7 files changed, 62 insertions(+), 51 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index bf57b8857f1..0ed109ae484 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -1560,7 +1560,7 @@ func (cs *ControllerServer) ControllerExpandVolume( // 2. Block VolumeMode with Encryption // Hence set nodeExpansion flag based on VolumeMode and Encryption status nodeExpansion := true - if req.GetVolumeCapability().GetBlock() != nil && !rbdVol.isEncrypted() { + if req.GetVolumeCapability().GetBlock() != nil && !rbdVol.isBlockEncrypted() { nodeExpansion = false } diff --git a/internal/rbd/encryption.go b/internal/rbd/encryption.go index 4afd8b4f766..60522504f2e 100644 --- a/internal/rbd/encryption.go +++ b/internal/rbd/encryption.go @@ -93,16 +93,21 @@ func (ri *rbdImage) ensureEncryptionMetadataSet(status rbdEncryptionState) error return nil } -// isEncrypted returns `true` if the rbdImage is (or needs to be) encrypted. -func (ri *rbdImage) isEncrypted() bool { - return ri.encryption != nil +// isBlockEncrypted returns `true` if the rbdImage is (or needs to be) encrypted. +func (ri *rbdImage) isBlockEncrypted() bool { + return ri.blockEncryption != nil } -// setupEncryption configures the metadata of the RBD image for encryption: +// isBlockDeviceEncrypted returns `true` if the filesystem on the rbdImage is (or needs to be) encrypted. +func (ri *rbdImage) isFileEncrypted() bool { + return ri.fileEncryption != nil +} + +// setupBlockEncryption configures the metadata of the RBD image for encryption: // - the Data-Encryption-Key (DEK) will be generated stored for use by the KMS; // - the RBD image will be marked to support encryption in its metadata. -func (ri *rbdImage) setupEncryption(ctx context.Context) error { - err := ri.encryption.StoreNewCryptoPassphrase(ri.VolID, encryptionPassphraseSize) +func (ri *rbdImage) setupBlockEncryption(ctx context.Context) error { + err := ri.blockEncryption.StoreNewCryptoPassphrase(ri.VolID, encryptionPassphraseSize) if err != nil { log.ErrorLog(ctx, "failed to save encryption passphrase for "+ "image %s: %s", ri, err) @@ -132,7 +137,7 @@ func (ri *rbdImage) setupEncryption(ctx context.Context) error { // (Usecase: Restoring snapshot into a storageclass with different encryption config). func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) error { // nothing to do if parent image is not encrypted. - if !ri.isEncrypted() { + if !ri.isBlockEncrypted() { return nil } @@ -142,21 +147,21 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) } // get the unencrypted passphrase - passphrase, err := ri.encryption.GetCryptoPassphrase(ri.VolID) + passphrase, err := ri.blockEncryption.GetCryptoPassphrase(ri.VolID) if err != nil { return fmt.Errorf("failed to fetch passphrase for %q: %w", ri, err) } if !copyOnlyPassphrase { - cp.encryption, err = util.NewVolumeEncryption(ri.encryption.GetID(), ri.encryption.KMS) + cp.blockEncryption, err = util.NewVolumeEncryption(ri.blockEncryption.GetID(), ri.blockEncryption.KMS) if errors.Is(err, util.ErrDEKStoreNeeded) { - cp.encryption.SetDEKStore(cp) + cp.blockEncryption.SetDEKStore(cp) } } // re-encrypt the plain passphrase for the cloned volume - err = cp.encryption.StoreCryptoPassphrase(cp.VolID, passphrase) + err = cp.blockEncryption.StoreCryptoPassphrase(cp.VolID, passphrase) if err != nil { return fmt.Errorf("failed to store passphrase for %q: %w", cp, err) @@ -180,12 +185,12 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) // repairEncryptionConfig checks the encryption state of the current rbdImage, // and makes sure that the destination rbdImage has the same configuration. func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error { - if !ri.isEncrypted() { + if !ri.isBlockEncrypted() { return nil } // if ri is encrypted, copy its configuration in case it is missing - if !dest.isEncrypted() { + if !dest.isBlockEncrypted() { // dest needs to be connected to the cluster, otherwise it will // not be possible to write any metadata if dest.conn == nil { @@ -199,7 +204,7 @@ func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error { } func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error { - passphrase, err := ri.encryption.GetCryptoPassphrase(ri.VolID) + passphrase, err := ri.blockEncryption.GetCryptoPassphrase(ri.VolID) if err != nil { log.ErrorLog(ctx, "failed to get crypto passphrase for %s: %v", ri, err) @@ -225,7 +230,7 @@ func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error } func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) (string, error) { - passphrase, err := rv.encryption.GetCryptoPassphrase(rv.VolID) + passphrase, err := rv.blockEncryption.GetCryptoPassphrase(rv.VolID) if err != nil { log.ErrorLog(ctx, "failed to get passphrase for encrypted device %s: %v", rv, err) @@ -264,7 +269,7 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str return nil } - err = ri.configureEncryption(kmsID, credentials) + err = ri.configureBlockDeviceEncryption(kmsID, credentials) if err != nil { return fmt.Errorf("invalid encryption kms configuration: %w", err) } @@ -294,20 +299,20 @@ func (ri *rbdImage) ParseEncryptionOpts( return kmsID, nil } -// configureEncryption sets up the VolumeEncryption for this rbdImage. Once -// configured, use isEncrypted() to see if the volume supports encryption. -func (ri *rbdImage) configureEncryption(kmsID string, credentials map[string]string) error { +// configureBlockDeviceEncryption sets up the VolumeEncryption for this rbdImage. Once +// configured, use isBlockEncrypted() to see if the volume supports block encryption. +func (ri *rbdImage) configureBlockEncryption(kmsID string, credentials map[string]string) error { kms, err := kmsapi.GetKMS(ri.Owner, kmsID, credentials) if err != nil { return err } - ri.encryption, err = util.NewVolumeEncryption(kmsID, kms) + ri.blockEncryption, err = util.NewVolumeEncryption(kmsID, kms) // if the KMS can not store the DEK itself, we'll store it in the // metadata of the RBD image itself if errors.Is(err, util.ErrDEKStoreNeeded) { - ri.encryption.SetDEKStore(ri) + ri.blockEncryption.SetDEKStore(ri) } return nil diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 9b8c17fa3db..13ae406633f 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -55,8 +55,8 @@ type stageTransaction struct { isStagePathCreated bool // isMounted represents if the volume was mounted or not isMounted bool - // isEncrypted represents if the volume was encrypted or not - isEncrypted bool + // isBlockEncrypted represents if the volume was encrypted or not + isBlockEncrypted bool // devicePath represents the path where rbd device is mapped devicePath string } @@ -425,12 +425,12 @@ func (ns *NodeServer) stageTransaction( } } - if volOptions.isEncrypted() { + if volOptions.isBlockEncrypted() { devicePath, err = ns.processEncryptedDevice(ctx, volOptions, devicePath) if err != nil { return transaction, err } - transaction.isEncrypted = true + transaction.isBlockEncrypted = true } stagingTargetPath := getStagingTargetPath(req) @@ -475,13 +475,13 @@ func resizeNodeStagePath(ctx context.Context, var ok bool // if its a non encrypted block device we dont need any expansion - if isBlock && !transaction.isEncrypted { + if isBlock && !transaction.isBlockEncrypted { return nil } resizer := mount.NewResizeFs(utilexec.New()) - if transaction.isEncrypted { + if transaction.isBlockEncrypted { devicePath, err = resizeEncryptedDevice(ctx, volID, stagingTargetPath, devicePath) if err != nil { return status.Error(codes.Internal, err.Error()) @@ -611,7 +611,7 @@ func (ns *NodeServer) undoStagingTransaction( // Unmapping rbd device if transaction.devicePath != "" { - err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isEncrypted) + err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isBlockEncrypted) if err != nil { log.ErrorLog( ctx, @@ -1146,7 +1146,7 @@ func (ns *NodeServer) processEncryptedDevice( // CreateVolume. // Use the same setupEncryption() as CreateVolume does, and // continue with the common process to crypt-format the device. - err = volOptions.setupEncryption(ctx) + err = volOptions.setupBlockEncryption(ctx) if err != nil { log.ErrorLog(ctx, "failed to setup encryption for rbd"+ "image %s: %v", imageSpec, err) diff --git a/internal/rbd/rbd_attach.go b/internal/rbd/rbd_attach.go index ad326ac8181..1af3f065bc2 100644 --- a/internal/rbd/rbd_attach.go +++ b/internal/rbd/rbd_attach.go @@ -473,7 +473,7 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util. imageOrDeviceSpec: imagePath, isImageSpec: true, isNbd: isNbd, - encrypted: volOpt.isEncrypted(), + encrypted: volOpt.isBlockEncrypted(), volumeID: volOpt.VolID, unmapOptions: volOpt.UnmapOptions, logDir: volOpt.LogDir, diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index eadfa99e7ce..e0e9914643a 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -246,8 +246,8 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er } kmsID := "" - if rv.isEncrypted() { - kmsID = rv.encryption.GetID() + if rv.isBlockEncrypted() { + kmsID = rv.blockEncryption.GetID() } j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds) @@ -387,8 +387,8 @@ func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, c defer j.Destroy() kmsID := "" - if rbdVol.isEncrypted() { - kmsID = rbdVol.encryption.GetID() + if rbdVol.isBlockEncrypted() { + kmsID = rbdVol.blockEncryption.GetID() } rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName( @@ -461,8 +461,8 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr } kmsID := "" - if rbdVol.isEncrypted() { - kmsID = rbdVol.encryption.GetID() + if rbdVol.isBlockEncrypted() { + kmsID = rbdVol.blockEncryption.GetID() } j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr) diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index d32c4d39136..ff2e682b413 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -118,6 +118,7 @@ type rbdImage struct { ParentPool string // Cluster name ClusterName string + // Owner is the creator (tenant, Kubernetes Namespace) of the volume Owner string @@ -130,9 +131,14 @@ type rbdImage struct { ObjectSize uint64 ImageFeatureSet librbd.FeatureSet - // encryption provides access to optional VolumeEncryption functions - encryption *util.VolumeEncryption + + // blockEncryption provides access to optional VolumeEncryption functions (e.g LUKS) + blockEncryption *util.VolumeEncryption + // fileEncryption provides access to optional VolumeEncryption functions (e.g fscrypt) + fileEncryption *util.VolumeEncryption + CreatedAt *timestamp.Timestamp + // conn is a connection to the Ceph cluster obtained from a ConnPool conn *util.ClusterConnection // an opened IOContext, call .openIoctx() before using @@ -384,8 +390,8 @@ func (ri *rbdImage) Destroy() { if ri.conn != nil { ri.conn.Destroy() } - if ri.isEncrypted() { - ri.encryption.Destroy() + if ri.isBlockEncrypted() { + ri.blockEncryption.Destroy() } } @@ -438,8 +444,8 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er return fmt.Errorf("failed to create rbd image: %w", err) } - if pOpts.isEncrypted() { - err = pOpts.setupEncryption(ctx) + if pOpts.isBlockEncrypted() { + err = pOpts.setupBlockEncryption(ctx) if err != nil { return fmt.Errorf("failed to setup encryption for image %s: %w", pOpts, err) } @@ -624,9 +630,9 @@ func (ri *rbdImage) deleteImage(ctx context.Context) error { return err } - if ri.isEncrypted() { + if ri.isBlockEncrypted() { log.DebugLog(ctx, "rbd: going to remove DEK for %q", ri) - if err = ri.encryption.RemoveDEK(ri.VolID); err != nil { + if err = ri.blockEncryption.RemoveDEK(ri.VolID); err != nil { log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", ri.VolID, err) } } @@ -1009,7 +1015,7 @@ func genSnapFromSnapID( } if imageAttributes.KmsID != "" { - err = rbdSnap.configureEncryption(imageAttributes.KmsID, secrets) + err = rbdSnap.configureBlockEncryption(imageAttributes.KmsID, secrets) if err != nil { return fmt.Errorf("failed to configure encryption for "+ "%q: %w", rbdSnap, err) @@ -1104,7 +1110,7 @@ func generateVolumeFromVolumeID( rbdVol.Owner = imageAttributes.Owner if imageAttributes.KmsID != "" { - err = rbdVol.configureEncryption(imageAttributes.KmsID, secrets) + err = rbdVol.configureBlockEncryption(imageAttributes.KmsID, secrets) if err != nil { return rbdVol, err } @@ -1681,7 +1687,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, metaDataPath string) error { Pool: volOptions.Pool, RadosNamespace: volOptions.RadosNamespace, ImageName: volOptions.RbdImageName, - Encrypted: volOptions.isEncrypted(), + Encrypted: volOptions.isBlockEncrypted(), UnmapOptions: volOptions.UnmapOptions, } @@ -1962,10 +1968,10 @@ func (ri *rbdImage) getOrigSnapName(snapID uint64) (string, error) { func (ri *rbdImage) isCompatibleEncryption(dst *rbdImage) error { switch { - case ri.isEncrypted() && !dst.isEncrypted(): + case ri.isBlockEncrypted() && !dst.isBlockEncrypted(): return fmt.Errorf("cannot create unencrypted volume from encrypted volume %q", ri) - case !ri.isEncrypted() && dst.isEncrypted(): + case !ri.isBlockEncrypted() && dst.isBlockEncrypted(): return fmt.Errorf("cannot create encrypted volume from unencrypted volume %q", ri) } diff --git a/internal/rbd/snapshot.go b/internal/rbd/snapshot.go index 089946757d2..c5f0a0bdf7f 100644 --- a/internal/rbd/snapshot.go +++ b/internal/rbd/snapshot.go @@ -111,7 +111,7 @@ func generateVolFromSnap(rbdSnap *rbdSnapshot) *rbdVolume { // copyEncryptionConfig cannot be used here because the volume and the // snapshot will have the same volumeID which cases the panic in // copyEncryptionConfig function. - vol.encryption = rbdSnap.encryption + vol.blockEncryption = rbdSnap.blockEncryption return vol } From 8827105beb556eb698cbcef25f722007fa854db8 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 20 Jul 2022 21:50:15 +0200 Subject: [PATCH 07/35] journal: Store encryptionType in Config struct Add encryptionType next to kmsID to support both block and file encryption. Signed-off-by: Marcel Lauhoff --- internal/journal/voljournal.go | 36 ++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/internal/journal/voljournal.go b/internal/journal/voljournal.go index 174bb72b472..988bf71cc3b 100644 --- a/internal/journal/voljournal.go +++ b/internal/journal/voljournal.go @@ -149,6 +149,9 @@ type Config struct { // encryptKMS in which encryption passphrase was saved, default is no encryption encryptKMSKey string + // encryptKMS in which encryption passphrase was saved, default is no encryption + encryptionType string + // ownerKey is used to identify the owner of the volume, can be used with some KMS configurations ownerKey string @@ -172,6 +175,7 @@ func NewCSIVolumeJournal(suffix string) *Config { namespace: "", csiImageIDKey: "csi.imageid", encryptKMSKey: "csi.volume.encryptKMS", + encryptionType: "csi.volume.encryptionType", ownerKey: "csi.volume.owner", backingSnapshotIDKey: "csi.volume.backingsnapshotid", commonPrefix: "csi.", @@ -191,6 +195,7 @@ func NewCSISnapshotJournal(suffix string) *Config { namespace: "", csiImageIDKey: "csi.imageid", encryptKMSKey: "csi.volume.encryptKMS", + encryptionType: "csi.volume.encryptionType", ownerKey: "csi.volume.owner", commonPrefix: "csi.", } @@ -280,6 +285,7 @@ Return values: */ func (conn *Connection) CheckReservation(ctx context.Context, journalPool, reqName, namePrefix, snapParentName, kmsConfig string, + encryptionType util.EncryptionType, ) (*ImageData, error) { var ( snapSource bool @@ -377,6 +383,14 @@ func (conn *Connection) CheckReservation(ctx context.Context, } } + if encryptionType != util.EncryptionTypeInvalid { + if savedImageAttributes.EncryptionType != encryptionType { + return nil, fmt.Errorf("internal state inconsistent, omap encryption type"+ + " mismatch, request KMS (%s) volume UUID (%s) volume omap KMS (%d)", + kmsConfig, objUUID, savedImageAttributes.EncryptionType) + } + } + // TODO: skipping due to excessive poolID to poolname call, also this should never happen! // check if journal pool points back to the passed in journal pool // if savedJournalPoolID != journalPoolID { @@ -530,6 +544,7 @@ Input arguments: - namePrefix: Prefix to use when generating the image/subvolume name (suffix is an auto-generated UUID) - parentName: Name of the parent image/subvolume if reservation is for a snapshot (optional) - kmsConf: Name of the key management service used to encrypt the image (optional) + - encryptionType: Type of encryption used when kmsConf is set (optional) - volUUID: UUID need to be reserved instead of auto-generating one (this is useful for mirroring and metro-DR) - owner: the owner of the volume (optional) - backingSnapshotID: ID of the snapshot on which the CephFS snapshot-backed volume is based (optional) @@ -544,6 +559,7 @@ func (conn *Connection) ReserveName(ctx context.Context, imagePool string, imagePoolID int64, reqName, namePrefix, parentName, kmsConf, volUUID, owner, backingSnapshotID string, + encryptionType util.EncryptionType, ) (string, string, error) { // TODO: Take in-arg as ImageAttributes? var ( @@ -624,6 +640,7 @@ func (conn *Connection) ReserveName(ctx context.Context, // Update UUID directory to store encryption values if kmsConf != "" { omapValues[cj.encryptKMSKey] = kmsConf + omapValues[cj.encryptionType] = util.EncryptionTypeString(encryptionType) } // if owner is passed, set it in the UUID directory too @@ -660,14 +677,15 @@ func (conn *Connection) ReserveName(ctx context.Context, // ImageAttributes contains all CSI stored image attributes, typically as OMap keys. type ImageAttributes struct { - RequestName string // Contains the request name for the passed in UUID - SourceName string // Contains the parent image name for the passed in UUID, if it is a snapshot - ImageName string // Contains the image or subvolume name for the passed in UUID - KmsID string // Contains encryption KMS, if it is an encrypted image - Owner string // Contains the owner to be used in combination with KmsID (for some KMS) - ImageID string // Contains the image id - JournalPoolID int64 // Pool ID of the CSI journal pool, stored in big endian format (on-disk data) - BackingSnapshotID string // ID of the snapshot on which the CephFS snapshot-backed volume is based + RequestName string // Contains the request name for the passed in UUID + SourceName string // Contains the parent image name for the passed in UUID, if it is a snapshot + ImageName string // Contains the image or subvolume name for the passed in UUID + KmsID string // Contains encryption KMS, if it is an encrypted image + EncryptionType util.EncryptionType // Type of encryption used, if image encrypted + Owner string // Contains the owner to be used in combination with KmsID (for some KMS) + ImageID string // Contains the image id + JournalPoolID int64 // Pool ID of the CSI journal pool, stored in big endian format (on-disk data) + BackingSnapshotID string // ID of the snapshot on which the CephFS snapshot-backed volume is based } // GetImageAttributes fetches all keys and their values, from a UUID directory, returning ImageAttributes structure. @@ -692,6 +710,7 @@ func (conn *Connection) GetImageAttributes( cj.csiNameKey, cj.csiImageKey, cj.encryptKMSKey, + cj.encryptionType, cj.csiJournalPool, cj.cephSnapSourceKey, cj.csiImageIDKey, @@ -711,6 +730,7 @@ func (conn *Connection) GetImageAttributes( var found bool imageAttributes.RequestName = values[cj.csiNameKey] imageAttributes.KmsID = values[cj.encryptKMSKey] + imageAttributes.EncryptionType = util.ParseEncryptionType(values[cj.encryptionType]) imageAttributes.Owner = values[cj.ownerKey] imageAttributes.ImageID = values[cj.csiImageIDKey] imageAttributes.BackingSnapshotID = values[cj.backingSnapshotIDKey] From 259b4d006cc8681f2304ebc76475a1342064c3dc Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 12 Aug 2022 16:30:35 +0200 Subject: [PATCH 08/35] fscrypt: fscrypt integration Integrate google/fscrypt into Ceph CSI KMS and encryption setup. Adds dependencies to google/fscrypt and pkg/xattr. Be as generic as possible to support integration with both RBD and Ceph FS. Add the following public functions: InitializeNode: per-node initialization steps. Must be called before Unlock at least once. Unlock: All steps necessary to unlock an encrypted directory including setting it up initially. IsDirectoryUnlocked: Test if directory is really encrypted Signed-off-by: Marcel Lauhoff --- go.mod | 2 + go.sum | 7 + internal/util/fscrypt/fscrypt.go | 382 +++++++++++++++++++++++++++++++ vendor/modules.txt | 14 ++ 4 files changed, 405 insertions(+) create mode 100644 internal/util/fscrypt/fscrypt.go diff --git a/go.mod b/go.mod index e47f1b7d18d..badf4dde544 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26 github.com/golang/protobuf v1.5.2 github.com/google/uuid v1.3.0 + github.com/google/fscrypt v0.3.3 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/vault/api v1.7.2 @@ -23,6 +24,7 @@ require ( github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a github.com/onsi/ginkgo/v2 v2.1.6 github.com/onsi/gomega v1.20.1 + github.com/pkg/xattr v0.4.7 github.com/prometheus/client_golang v1.12.2 github.com/stretchr/testify v1.8.0 golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd diff --git a/go.sum b/go.sum index 2552be64479..34b701e0ae0 100644 --- a/go.sum +++ b/go.sum @@ -485,6 +485,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cadvisor v0.45.0/go.mod h1:vsMT3Uv2XjQ8M7WUtKARV74mU/HN64C4XtM1bJhUKcU= github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjUQTriCw= +github.com/google/fscrypt v0.3.3 h1:qwx9OCR/xZE68VGr/r0/yugFhlGpIOGsH9JHrttP7vc= +github.com/google/fscrypt v0.3.3/go.mod h1:H1JHtH8BVe0dYNhzx1Ztkn3azQ0OBdoOmM828vEWAXc= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -991,6 +993,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/xattr v0.4.7 h1:XoA3KzmFvyPlH4RwX5eMcgtzcaGBaSvgt3IoFQfbrmQ= +github.com/pkg/xattr v0.4.7/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/portworx/dcos-secrets v0.0.0-20180616013705-8e8ec3f66611/go.mod h1:4hklRW/4DQpLqkcXcjtNprbH2tz/sJaNtqinfPWl/LA= @@ -1143,6 +1147,7 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= @@ -1503,6 +1508,7 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9w golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210422114643-f5beecf764ed/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1551,6 +1557,7 @@ golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDq golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191025023517-2077df36852e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go new file mode 100644 index 00000000000..d339258ed3e --- /dev/null +++ b/internal/util/fscrypt/fscrypt.go @@ -0,0 +1,382 @@ +/* +Copyright 2022 The Ceph-CSI Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fscrypt + +/* +#include +*/ +import "C" + +import ( + "context" + "errors" + "fmt" + "os" + "os/user" + "path" + "time" + "unsafe" + + fscryptactions "github.com/google/fscrypt/actions" + fscryptcrypto "github.com/google/fscrypt/crypto" + fscryptfilesystem "github.com/google/fscrypt/filesystem" + fscryptmetadata "github.com/google/fscrypt/metadata" + "github.com/pkg/xattr" + "golang.org/x/sys/unix" + + "github.com/ceph/ceph-csi/internal/kms" + "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/log" +) + +const ( + FscryptHashingTimeTarget = 1 * time.Second + FscryptProtectorPrefix = "ceph-csi" + FscryptSubdir = "ceph-csi-encrypted" + encryptionPassphraseSize = 64 +) + +func AppendEncyptedSubdirectory(dir string) string { + return path.Join(dir, FscryptSubdir) +} + +// getPassphrase returns the passphrase from the configured Ceph CSI KMS to be used as a protector key in fscrypt. +func getPassphrase(ctx context.Context, encryption util.VolumeEncryption, volID string) (string, error) { + var ( + passphrase string + err error + ) + + switch encryption.KMS.RequiresDEKStore() { + case kms.DEKStoreIntegrated: + passphrase, err = encryption.GetCryptoPassphrase(volID) + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to get passphrase from KMS: %v", err) + + return "", err + } + case kms.DEKStoreMetadata: + passphrase, err = encryption.KMS.GetSecret(volID) + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to GetSecret: %v", err) + + return "", err + } + } + + return passphrase, nil +} + +// createKeyFuncFromVolumeEncryption returns an fscrypt key function returning +// encryption keys form a VolumeEncryption struct. +func createKeyFuncFromVolumeEncryption( + ctx context.Context, + encryption util.VolumeEncryption, + volID string, +) (func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error), error) { + passphrase, err := getPassphrase(ctx, encryption, volID) + if err != nil { + return nil, err + } + + keyFunc := func(info fscryptactions.ProtectorInfo, retry bool) (*fscryptcrypto.Key, error) { + key, err := fscryptcrypto.NewBlankKey(32) + copy(key.Data(), passphrase) + + return key, err + } + + return keyFunc, nil +} + +// unlockExisting tries to unlock an already set up fscrypt directory using keys from Ceph CSI. +func unlockExisting( + ctx context.Context, + fscryptContext *fscryptactions.Context, + encryptedPath string, protectorName string, + keyFn func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error), +) error { + var err error + + policy, err := fscryptactions.GetPolicyFromPath(fscryptContext, encryptedPath) + if err != nil { + log.ErrorLog(ctx, "fscrypt: policy get failed %v", err) + + return err + } + + optionFn := func(policyDescriptor string, options []*fscryptactions.ProtectorOption) (int, error) { + for idx, option := range options { + if option.Name() == protectorName { + return idx, nil + } + } + + return 0, &fscryptactions.ErrNotProtected{PolicyDescriptor: policyDescriptor, ProtectorDescriptor: protectorName} + } + + if err = policy.Unlock(optionFn, keyFn); err != nil { + log.ErrorLog(ctx, "fscrypt: unlock with protector error: %v", err) + + return err + } + + defer func() { + err = policy.Lock() + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to lock policy after use: %v", err) + } + }() + + if err = policy.Provision(); err != nil { + log.ErrorLog(ctx, "fscrypt: provision fail %v", err) + + return err + } + + log.DebugLog(ctx, "fscrypt protector unlock: %s %+v", protectorName, policy) + + return nil +} + +func initializeAndUnlock( + ctx context.Context, + fscryptContext *fscryptactions.Context, + encryptedPath string, protectorName string, + keyFn func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error), +) error { + var owner *user.User + var err error + + if err = os.Mkdir(encryptedPath, 0o755); err != nil { + return err + } + + protector, err := fscryptactions.CreateProtector(fscryptContext, protectorName, keyFn, owner) + if err != nil { + log.ErrorLog(ctx, "fscrypt: protector name=%s create failed: %v. reverting.", protectorName, err) + if revertErr := protector.Revert(); revertErr != nil { + return revertErr + } + + return err + } + + if err = protector.Unlock(keyFn); err != nil { + return err + } + log.DebugLog(ctx, "fscrypt protector unlock: %+v", protector) + + var policy *fscryptactions.Policy + if policy, err = fscryptactions.CreatePolicy(fscryptContext, protector); err != nil { + return err + } + defer func() { + err = policy.Lock() + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to lock policy after init: %w") + err = policy.Revert() + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to revert policy after failed lock: %w") + } + } + }() + + if err = policy.UnlockWithProtector(protector); err != nil { + log.ErrorLog(ctx, "fscrypt: Failed to unlock policy: %v", err) + + return err + } + + if err = policy.Provision(); err != nil { + log.ErrorLog(ctx, "fscrypt: Failed to provision policy: %v", err) + + return err + } + + if err = policy.Apply(encryptedPath); err != nil { + log.ErrorLog(ctx, "fscrypt: Failed to apply protector (see also kernel log): %w", err) + if err = policy.Deprovision(false); err != nil { + log.ErrorLog(ctx, "fscrypt: Policy cleanup response to failing apply failed: %w", err) + } + + return err + } + + return nil +} + +// getInodeEncryptedAttribute returns the inode's encrypt attribute similar to lsattr(1) +func getInodeEncryptedAttribute(p string) (bool, error) { + file, err := os.Open(p) + if err != nil { + return false, err + } + defer file.Close() + + var attr int + _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), unix.FS_IOC_GETFLAGS, + uintptr(unsafe.Pointer(&attr))) + if errno != 0 { + return false, fmt.Errorf("error calling ioctl_iflags: %w", errno) + } + + if attr&C.FS_ENCRYPT_FL != 0 { + return true, nil + } + + return false, nil +} + +// IsDirectoryUnlockedFscrypt checks if a directory is an unlocked fscrypted directory. +func IsDirectoryUnlocked(directoryPath, filesystem string) error { + if _, err := fscryptmetadata.GetPolicy(directoryPath); err != nil { + return fmt.Errorf("no fscrypt policy set on directory %q: %w", directoryPath, err) + } + + switch filesystem { + case "ceph": + _, err := xattr.Get(directoryPath, "ceph.fscrypt.auth") + if err != nil { + return fmt.Errorf("error reading ceph.fscrypt.auth xattr on %q: %w", directoryPath, err) + } + default: + encrypted, err := getInodeEncryptedAttribute(directoryPath) + if err != nil { + return err + } + + if !encrypted { + return fmt.Errorf("path %s does not have the encrypted inode flag set. Encryption init must have failed", + directoryPath) + } + } + + return nil +} + +// InitializeNode performs once per nodeserver initialization +// required by the fscrypt library. Creates /etc/fscrypt.conf. +func InitializeNode(ctx context.Context) error { + err := fscryptactions.CreateConfigFile(FscryptHashingTimeTarget, 2) + if err != nil { + existsError := &fscryptactions.ErrConfigFileExists{} + if errors.As(err, &existsError) { + log.ErrorLog(ctx, "fscrypt: config file %q already exists. Skipping fscrypt node setup", + existsError.Path) + + return nil + } + + return fmt.Errorf("fscrypt node init failed to create node configuration (/etc/fscrypt.conf): %w", + err) + } + + return nil +} + +// FscryptUnlock unlocks possilby creating fresh fscrypt metadata +// iff a volume is encrypted. Otherwise return immediately Calling +// this function requires that InitializeFscrypt ran once on this node. +func Unlock( + ctx context.Context, + volEncryption *util.VolumeEncryption, + stagingTargetPath string, volID string, +) error { + fscryptContext, err := fscryptactions.NewContextFromMountpoint(stagingTargetPath, nil) + if err != nil { + log.ErrorLog(ctx, "fscrypt: failed to create context from mountpoint %v: %w", stagingTargetPath) + + return err + } + + fscryptContext.Config.UseFsKeyringForV1Policies = true + + log.DebugLog(ctx, "fscrypt context: %+v", fscryptContext) + + if err = fscryptContext.Mount.CheckSupport(); err != nil { + log.ErrorLog(ctx, "fscrypt: filesystem mount %s does not support fscrypt", fscryptContext.Mount) + + return err + } + + // A proper set up fscrypy directory requires metadata and a kernel policy: + + // 1. Do we have a metadata directory (.fscrypt) set up? + metadataDirExists := false + if err = fscryptContext.Mount.Setup(0o755); err != nil { + alreadySetupErr := &fscryptfilesystem.ErrAlreadySetup{} + if errors.As(err, &alreadySetupErr) { + log.DebugLog(ctx, "fscrypt: metadata directory %q already set up", alreadySetupErr.Mount.Path) + metadataDirExists = true + } else { + log.ErrorLog(ctx, "fscrypt: mount setup failed: %v", err) + + return err + } + } + + encryptedPath := path.Join(stagingTargetPath, FscryptSubdir) + kernelPolicyExists := false + // 2. Ask the kernel if the directory has an fscrypt policy in place. + if _, err = fscryptmetadata.GetPolicy(encryptedPath); err == nil { // encrypted directory already set up + kernelPolicyExists = true + } + + if metadataDirExists != kernelPolicyExists { + return fmt.Errorf("fscrypt: unsupported state metadata=%t kernel_policy=%t", + metadataDirExists, kernelPolicyExists) + } + + keyFn, err := createKeyFuncFromVolumeEncryption(ctx, *volEncryption, volID) + if err != nil { + log.ErrorLog(ctx, "fscrypt: could not create key function: %v", err) + + return err + } + + protectorName := fmt.Sprintf("%s-%s", FscryptProtectorPrefix, volEncryption.GetID()) + + switch volEncryption.KMS.RequiresDEKStore() { + case kms.DEKStoreMetadata: + // Metadata style KMS use the KMS secret as a custom + // passphrase directly in fscrypt, circumenting key + // derivation on the CSI side to allow users to fall + // back on the fscrypt commandline tool easily + fscryptContext.Config.Source = fscryptmetadata.SourceType_custom_passphrase + case kms.DEKStoreIntegrated: + fscryptContext.Config.Source = fscryptmetadata.SourceType_raw_key + } + + if kernelPolicyExists && metadataDirExists { + log.DebugLog(ctx, "fscrypt: Encrypted directory already set up, policy exists") + + return unlockExisting(ctx, fscryptContext, encryptedPath, protectorName, keyFn) + } + + if !kernelPolicyExists && !metadataDirExists { + log.DebugLog(ctx, "fscrypt: Creating new protector and policy") + if volEncryption.KMS.RequiresDEKStore() == kms.DEKStoreIntegrated { + if err := volEncryption.StoreNewCryptoPassphrase(volID, encryptionPassphraseSize); err != nil { + log.ErrorLog(ctx, "fscrypt: store new crypto passphrase failed: %v", err) + + return err + } + } + + return initializeAndUnlock(ctx, fscryptContext, encryptedPath, protectorName, keyFn) + } + + return fmt.Errorf("unsupported") +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e1d6542ff5..19c46ae61ee 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -230,6 +230,15 @@ github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy +# github.com/google/fscrypt v0.3.3 +## explicit; go 1.11 +github.com/google/fscrypt/actions +github.com/google/fscrypt/crypto +github.com/google/fscrypt/filesystem +github.com/google/fscrypt/keyring +github.com/google/fscrypt/metadata +github.com/google/fscrypt/security +github.com/google/fscrypt/util # github.com/google/gnostic v0.5.7-v3refs ## explicit; go 1.12 github.com/google/gnostic/compiler @@ -476,6 +485,9 @@ github.com/pierrec/lz4/internal/xxh32 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/pkg/xattr v0.4.7 +## explicit; go 1.14 +github.com/pkg/xattr # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib @@ -594,6 +606,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd ## explicit; go 1.17 +golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -602,6 +615,7 @@ golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 +golang.org/x/crypto/hkdf golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/internal/subtle golang.org/x/crypto/pbkdf2 From ea13d57e98e1aacf786ef62ce4790cf4f39e4d0d Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Thu, 23 Jun 2022 13:58:36 +0200 Subject: [PATCH 09/35] vendor: vendor fscrypt integration dependencies Signed-off-by: Marcel Lauhoff --- go.mod | 2 +- go.sum | 1 + vendor/github.com/google/fscrypt/LICENSE | 202 +++ .../google/fscrypt/actions/callback.go | 132 ++ .../google/fscrypt/actions/config.go | 293 +++++ .../google/fscrypt/actions/context.go | 184 +++ .../google/fscrypt/actions/policy.go | 622 ++++++++++ .../google/fscrypt/actions/protector.go | 300 +++++ .../google/fscrypt/actions/recovery.go | 131 ++ .../google/fscrypt/crypto/crypto.go | 228 ++++ .../github.com/google/fscrypt/crypto/key.go | 354 ++++++ .../github.com/google/fscrypt/crypto/rand.go | 98 ++ .../google/fscrypt/filesystem/filesystem.go | 1088 +++++++++++++++++ .../google/fscrypt/filesystem/mountpoint.go | 578 +++++++++ .../google/fscrypt/filesystem/path.go | 128 ++ .../google/fscrypt/keyring/fs_keyring.go | 326 +++++ .../google/fscrypt/keyring/keyring.go | 175 +++ .../google/fscrypt/keyring/user_keyring.go | 251 ++++ .../google/fscrypt/metadata/checks.go | 221 ++++ .../google/fscrypt/metadata/config.go | 59 + .../google/fscrypt/metadata/constants.go | 57 + .../google/fscrypt/metadata/metadata.pb.go | 589 +++++++++ .../google/fscrypt/metadata/metadata.proto | 107 ++ .../google/fscrypt/metadata/policy.go | 348 ++++++ .../google/fscrypt/security/cache.go | 49 + .../google/fscrypt/security/privileges.go | 156 +++ .../github.com/google/fscrypt/util/errors.go | 135 ++ vendor/github.com/google/fscrypt/util/util.go | 163 +++ vendor/github.com/pkg/xattr/.gitignore | 26 + vendor/github.com/pkg/xattr/LICENSE | 25 + vendor/github.com/pkg/xattr/README.md | 45 + vendor/github.com/pkg/xattr/xattr.go | 255 ++++ vendor/github.com/pkg/xattr/xattr_bsd.go | 201 +++ vendor/github.com/pkg/xattr/xattr_darwin.go | 90 ++ vendor/github.com/pkg/xattr/xattr_linux.go | 142 +++ vendor/github.com/pkg/xattr/xattr_solaris.go | 165 +++ .../github.com/pkg/xattr/xattr_unsupported.go | 70 ++ vendor/golang.org/x/crypto/argon2/argon2.go | 285 +++++ vendor/golang.org/x/crypto/argon2/blake2b.go | 53 + .../x/crypto/argon2/blamka_amd64.go | 61 + .../golang.org/x/crypto/argon2/blamka_amd64.s | 244 ++++ .../x/crypto/argon2/blamka_generic.go | 163 +++ .../golang.org/x/crypto/argon2/blamka_ref.go | 16 + vendor/golang.org/x/crypto/hkdf/hkdf.go | 93 ++ 44 files changed, 8910 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/google/fscrypt/LICENSE create mode 100644 vendor/github.com/google/fscrypt/actions/callback.go create mode 100644 vendor/github.com/google/fscrypt/actions/config.go create mode 100644 vendor/github.com/google/fscrypt/actions/context.go create mode 100644 vendor/github.com/google/fscrypt/actions/policy.go create mode 100644 vendor/github.com/google/fscrypt/actions/protector.go create mode 100644 vendor/github.com/google/fscrypt/actions/recovery.go create mode 100644 vendor/github.com/google/fscrypt/crypto/crypto.go create mode 100644 vendor/github.com/google/fscrypt/crypto/key.go create mode 100644 vendor/github.com/google/fscrypt/crypto/rand.go create mode 100644 vendor/github.com/google/fscrypt/filesystem/filesystem.go create mode 100644 vendor/github.com/google/fscrypt/filesystem/mountpoint.go create mode 100644 vendor/github.com/google/fscrypt/filesystem/path.go create mode 100644 vendor/github.com/google/fscrypt/keyring/fs_keyring.go create mode 100644 vendor/github.com/google/fscrypt/keyring/keyring.go create mode 100644 vendor/github.com/google/fscrypt/keyring/user_keyring.go create mode 100644 vendor/github.com/google/fscrypt/metadata/checks.go create mode 100644 vendor/github.com/google/fscrypt/metadata/config.go create mode 100644 vendor/github.com/google/fscrypt/metadata/constants.go create mode 100644 vendor/github.com/google/fscrypt/metadata/metadata.pb.go create mode 100644 vendor/github.com/google/fscrypt/metadata/metadata.proto create mode 100644 vendor/github.com/google/fscrypt/metadata/policy.go create mode 100644 vendor/github.com/google/fscrypt/security/cache.go create mode 100644 vendor/github.com/google/fscrypt/security/privileges.go create mode 100644 vendor/github.com/google/fscrypt/util/errors.go create mode 100644 vendor/github.com/google/fscrypt/util/util.go create mode 100644 vendor/github.com/pkg/xattr/.gitignore create mode 100644 vendor/github.com/pkg/xattr/LICENSE create mode 100644 vendor/github.com/pkg/xattr/README.md create mode 100644 vendor/github.com/pkg/xattr/xattr.go create mode 100644 vendor/github.com/pkg/xattr/xattr_bsd.go create mode 100644 vendor/github.com/pkg/xattr/xattr_darwin.go create mode 100644 vendor/github.com/pkg/xattr/xattr_linux.go create mode 100644 vendor/github.com/pkg/xattr/xattr_solaris.go create mode 100644 vendor/github.com/pkg/xattr/xattr_unsupported.go create mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go create mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go create mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go diff --git a/go.mod b/go.mod index badf4dde544..034d9de5caa 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,8 @@ require ( github.com/csi-addons/spec v0.1.2-0.20220906123848-52ce69f90900 github.com/gemalto/kmip-go v0.0.8-0.20220721195433-3fe83e2d3f26 github.com/golang/protobuf v1.5.2 - github.com/google/uuid v1.3.0 github.com/google/fscrypt v0.3.3 + github.com/google/uuid v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/vault/api v1.7.2 diff --git a/go.sum b/go.sum index 34b701e0ae0..7f408274e33 100644 --- a/go.sum +++ b/go.sum @@ -1502,6 +1502,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= diff --git a/vendor/github.com/google/fscrypt/LICENSE b/vendor/github.com/google/fscrypt/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/google/fscrypt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/fscrypt/actions/callback.go b/vendor/github.com/google/fscrypt/actions/callback.go new file mode 100644 index 00000000000..f15893db8ce --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/callback.go @@ -0,0 +1,132 @@ +/* + * callback.go - defines how the caller of an action function passes along a key + * to be used in this package. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "log" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" +) + +// ProtectorInfo is the information a caller will receive about a Protector +// before they have to return the corresponding key. This is currently a +// read-only view of metadata.ProtectorData. +type ProtectorInfo struct { + data *metadata.ProtectorData +} + +// Descriptor is the Protector's descriptor used to uniquely identify it. +func (pi *ProtectorInfo) Descriptor() string { return pi.data.GetProtectorDescriptor() } + +// Source indicates the type of the descriptor (how it should be unlocked). +func (pi *ProtectorInfo) Source() metadata.SourceType { return pi.data.GetSource() } + +// Name is used to describe custom passphrase and raw key descriptors. +func (pi *ProtectorInfo) Name() string { return pi.data.GetName() } + +// UID is used to identify the user for login passphrases. +func (pi *ProtectorInfo) UID() int64 { return pi.data.GetUid() } + +// KeyFunc is passed to a function that will require some type of key. +// The info parameter is provided so the callback knows which key to provide. +// The retry parameter indicates that a previous key provided by this callback +// was incorrect (this allows for user feedback like "incorrect passphrase"). +// +// For passphrase sources, the returned key should be a passphrase. For raw +// sources, the returned key should be a 256-bit cryptographic key. Consumers +// of the callback will wipe the returned key. An error returned by the callback +// will be propagated back to the caller. +type KeyFunc func(info ProtectorInfo, retry bool) (*crypto.Key, error) + +// getWrappingKey uses the provided callback to get the wrapping key +// corresponding to the ProtectorInfo. This runs the passphrase hash for +// passphrase sources or just relays the callback for raw sources. +func getWrappingKey(info ProtectorInfo, keyFn KeyFunc, retry bool) (*crypto.Key, error) { + // For raw key sources, we can just use the key directly. + if info.Source() == metadata.SourceType_raw_key { + return keyFn(info, retry) + } + + // Run the passphrase hash for other sources. + passphrase, err := keyFn(info, retry) + if err != nil { + return nil, err + } + defer passphrase.Wipe() + + log.Printf("running passphrase hash for protector %s", info.Descriptor()) + return crypto.PassphraseHash(passphrase, info.data.Salt, info.data.Costs) +} + +// unwrapProtectorKey uses the provided callback and ProtectorInfo to return +// the unwrapped protector key. This will repeatedly call keyFn to get the +// wrapping key until the correct key is returned by the callback or the +// callback returns an error. +func unwrapProtectorKey(info ProtectorInfo, keyFn KeyFunc) (*crypto.Key, error) { + retry := false + for { + wrappingKey, err := getWrappingKey(info, keyFn, retry) + if err != nil { + return nil, err + } + + protectorKey, err := crypto.Unwrap(wrappingKey, info.data.WrappedKey) + wrappingKey.Wipe() + + switch errors.Cause(err) { + case nil: + log.Printf("valid wrapping key for protector %s", info.Descriptor()) + return protectorKey, nil + case crypto.ErrBadAuth: + // After the first failure, we let the callback know we are retrying. + log.Printf("invalid wrapping key for protector %s", info.Descriptor()) + retry = true + continue + default: + return nil, err + } + } +} + +// ProtectorOption is information about a protector relative to a Policy. +type ProtectorOption struct { + ProtectorInfo + // LinkedMount is the mountpoint for a linked protector. It is nil if + // the protector is not a linked protector (or there is a LoadError). + LinkedMount *filesystem.Mount + // LoadError is non-nil if there was an error in getting the data for + // the protector. + LoadError error +} + +// OptionFunc is passed to a function that needs to unlock a Policy. +// The callback is used to specify which protector should be used to unlock a +// Policy. The descriptor indicates which Policy we are using, while the options +// correspond to the valid Protectors protecting the Policy. +// +// The OptionFunc should either return a valid index into options, which +// corresponds to the desired protector, or an error (which will be propagated +// back to the caller). +type OptionFunc func(policyDescriptor string, options []*ProtectorOption) (int, error) diff --git a/vendor/github.com/google/fscrypt/actions/config.go b/vendor/github.com/google/fscrypt/actions/config.go new file mode 100644 index 00000000000..a8eb029dbdc --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/config.go @@ -0,0 +1,293 @@ +/* + * config.go - Actions for creating a new config file, which includes new + * hashing costs and the config file's location. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "bytes" + "fmt" + "log" + "os" + "runtime" + "time" + + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ConfigFileLocation is the location of fscrypt's global settings. This can be +// overridden by the user of this package. +var ConfigFileLocation = "/etc/fscrypt.conf" + +// ErrBadConfig is an internal error that indicates that the config struct is invalid. +type ErrBadConfig struct { + Config *metadata.Config + UnderlyingError error +} + +func (err *ErrBadConfig) Error() string { + return fmt.Sprintf(`internal error: config is invalid: %s + + The invalid config is %s`, err.UnderlyingError, err.Config) +} + +// ErrBadConfigFile indicates that the config file is invalid. +type ErrBadConfigFile struct { + Path string + UnderlyingError error +} + +func (err *ErrBadConfigFile) Error() string { + return fmt.Sprintf("%q is invalid: %s", err.Path, err.UnderlyingError) +} + +// ErrConfigFileExists indicates that the config file already exists. +type ErrConfigFileExists struct { + Path string +} + +func (err *ErrConfigFileExists) Error() string { + return fmt.Sprintf("%q already exists", err.Path) +} + +// ErrNoConfigFile indicates that the config file doesn't exist. +type ErrNoConfigFile struct { + Path string +} + +func (err *ErrNoConfigFile) Error() string { + return fmt.Sprintf("%q doesn't exist", err.Path) +} + +const ( + // Permissions of the config file (global readable) + configPermissions = 0644 + // Config file should be created for writing and not already exist + createFlags = os.O_CREATE | os.O_WRONLY | os.O_EXCL + // 128 MiB is a large enough amount of memory to make the password hash + // very difficult to brute force on specialized hardware, but small + // enough to work on most GNU/Linux systems. + maxMemoryBytes = 128 * 1024 * 1024 +) + +var ( + timingPassphrase = []byte("I am a fake passphrase") + timingSalt = bytes.Repeat([]byte{42}, metadata.SaltLen) +) + +// CreateConfigFile creates a new config file at the appropriate location with +// the appropriate hashing costs and encryption parameters. The hashing will be +// configured to take as long as the specified time target. In addition, the +// version of encryption policy to use may be overridden from the default of v1. +func CreateConfigFile(target time.Duration, policyVersion int64) error { + // Create the config file before computing the hashing costs, so we fail + // immediately if the program has insufficient permissions. + configFile, err := filesystem.OpenFileOverridingUmask(ConfigFileLocation, + createFlags, configPermissions) + switch { + case os.IsExist(err): + return &ErrConfigFileExists{ConfigFileLocation} + case err != nil: + return err + } + defer configFile.Close() + + config := &metadata.Config{ + Source: metadata.DefaultSource, + Options: metadata.DefaultOptions, + } + + if policyVersion != 0 { + config.Options.PolicyVersion = policyVersion + } + + if config.HashCosts, err = getHashingCosts(target); err != nil { + return err + } + + log.Printf("Creating config at %q with %v\n", ConfigFileLocation, config) + return metadata.WriteConfig(config, configFile) +} + +// getConfig returns the current configuration struct. Any fields not specified +// in the config file use the system defaults. An error is returned if the +// config file hasn't been setup with CreateConfigFile yet or the config +// contains invalid data. +func getConfig() (*metadata.Config, error) { + configFile, err := os.Open(ConfigFileLocation) + switch { + case os.IsNotExist(err): + return nil, &ErrNoConfigFile{ConfigFileLocation} + case err != nil: + return nil, err + } + defer configFile.Close() + + log.Printf("Reading config from %q\n", ConfigFileLocation) + config, err := metadata.ReadConfig(configFile) + if err != nil { + return nil, &ErrBadConfigFile{ConfigFileLocation, err} + } + + // Use system defaults if not specified + if config.Source == metadata.SourceType_default { + config.Source = metadata.DefaultSource + log.Printf("Falling back to source of %q", config.Source.String()) + } + if config.Options.Padding == 0 { + config.Options.Padding = metadata.DefaultOptions.Padding + log.Printf("Falling back to padding of %d", config.Options.Padding) + } + if config.Options.Contents == metadata.EncryptionOptions_default { + config.Options.Contents = metadata.DefaultOptions.Contents + log.Printf("Falling back to contents mode of %q", config.Options.Contents) + } + if config.Options.Filenames == metadata.EncryptionOptions_default { + config.Options.Filenames = metadata.DefaultOptions.Filenames + log.Printf("Falling back to filenames mode of %q", config.Options.Filenames) + } + if config.Options.PolicyVersion == 0 { + config.Options.PolicyVersion = metadata.DefaultOptions.PolicyVersion + log.Printf("Falling back to policy version of %d", config.Options.PolicyVersion) + } + + if err := config.CheckValidity(); err != nil { + return nil, &ErrBadConfigFile{ConfigFileLocation, err} + } + + return config, nil +} + +// getHashingCosts returns hashing costs so that hashing a password will take +// approximately the target time. This is done using the total amount of RAM, +// the number of CPUs present, and by running the passphrase hash many times. +func getHashingCosts(target time.Duration) (*metadata.HashingCosts, error) { + log.Printf("Finding hashing costs that take %v\n", target) + + // Start out with the minimal possible costs that use all the CPUs. + nCPUs := int64(runtime.NumCPU()) + costs := &metadata.HashingCosts{ + Time: 1, + Memory: 8 * nCPUs, + Parallelism: nCPUs, + } + + // If even the minimal costs are not fast enough, just return the + // minimal costs and log a warning. + t, err := timeHashingCosts(costs) + if err != nil { + return nil, err + } + log.Printf("Min Costs={%v}\t-> %v\n", costs, t) + + if t > target { + log.Printf("time exceeded the target of %v.\n", target) + return costs, nil + } + + // Now we start doubling the costs until we reach the target. + memoryKiBLimit := memoryBytesLimit() / 1024 + for { + // Store a copy of the previous costs + costsPrev := *costs + tPrev := t + + // Double the memory up to the max, then double the time. + if costs.Memory < memoryKiBLimit { + costs.Memory = util.MinInt64(2*costs.Memory, memoryKiBLimit) + } else { + costs.Time *= 2 + } + + // If our hashing failed, return the last good set of costs. + if t, err = timeHashingCosts(costs); err != nil { + log.Printf("Hashing with costs={%v} failed: %v\n", costs, err) + return &costsPrev, nil + } + log.Printf("Costs={%v}\t-> %v\n", costs, t) + + // If we have reached the target time, we return a set of costs + // based on the linear interpolation between the last two times. + if t >= target { + f := float64(target-tPrev) / float64(t-tPrev) + return &metadata.HashingCosts{ + Time: betweenCosts(costsPrev.Time, costs.Time, f), + Memory: betweenCosts(costsPrev.Memory, costs.Memory, f), + Parallelism: costs.Parallelism, + }, nil + } + } +} + +// memoryBytesLimit returns the maximum amount of memory we will use for +// passphrase hashing. This will never be more than a reasonable maximum (for +// compatibility) or an 8th the available system RAM. +func memoryBytesLimit() int64 { + // The sysinfo syscall only fails if given a bad address + var info unix.Sysinfo_t + err := unix.Sysinfo(&info) + util.NeverError(err) + + totalRAMBytes := int64(info.Totalram) + return util.MinInt64(totalRAMBytes/8, maxMemoryBytes) +} + +// betweenCosts returns a cost between a and b. Specifically, it returns the +// floor of a + f*(b-a). This way, f=0 returns a and f=1 returns b. +func betweenCosts(a, b int64, f float64) int64 { + return a + int64(f*float64(b-a)) +} + +// timeHashingCosts runs the passphrase hash with the specified costs and +// returns the time it takes to hash the passphrase. +func timeHashingCosts(costs *metadata.HashingCosts) (time.Duration, error) { + passphrase, err := crypto.NewKeyFromReader(bytes.NewReader(timingPassphrase)) + if err != nil { + return 0, err + } + defer passphrase.Wipe() + + // Be sure to measure CPU time, not wall time (time.Now) + begin := cpuTimeInNanoseconds() + hash, err := crypto.PassphraseHash(passphrase, timingSalt, costs) + if err == nil { + hash.Wipe() + } + end := cpuTimeInNanoseconds() + + // This uses a lot of memory, run the garbage collector + runtime.GC() + + return time.Duration((end - begin) / costs.Parallelism), nil +} + +// cpuTimeInNanoseconds returns the nanosecond count based on the process's CPU usage. +// This number has no absolute meaning, only relative meaning to other calls. +func cpuTimeInNanoseconds() int64 { + var ts unix.Timespec + err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts) + // ClockGettime fails if given a bad address or on a VERY old system. + util.NeverError(err) + return unix.TimespecToNsec(ts) +} diff --git a/vendor/github.com/google/fscrypt/actions/context.go b/vendor/github.com/google/fscrypt/actions/context.go new file mode 100644 index 00000000000..ac3f6d30455 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/context.go @@ -0,0 +1,184 @@ +/* + * context.go - top-level interface to fscrypt packages + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package actions is the high-level interface to the fscrypt packages. The +// functions here roughly correspond with commands for the tool in cmd/fscrypt. +// All of the actions include a significant amount of logging, so that good +// output can be provided for cmd/fscrypt's verbose mode. +// The top-level actions currently include: +// - Creating a new config file +// - Creating a context on which to perform actions +// - Creating, unlocking, and modifying Protectors +// - Creating, unlocking, and modifying Policies +package actions + +import ( + "log" + "os/user" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/keyring" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrLocked indicates that the key hasn't been unwrapped yet. +var ErrLocked = errors.New("key needs to be unlocked first") + +// Context contains the necessary global state to perform most of fscrypt's +// actions. +type Context struct { + // Config is the struct loaded from the global config file. It can be + // modified after being loaded to customise parameters. + Config *metadata.Config + // Mount is the filesystem relative to which all Protectors and Policies + // are added, edited, removed, and applied, and to which policies using + // the filesystem keyring are provisioned. + Mount *filesystem.Mount + // TargetUser is the user for whom protectors are created, and to whose + // keyring policies using the user keyring are provisioned. It's also + // the user for whom the keys are claimed in the filesystem keyring when + // v2 policies are provisioned. + TargetUser *user.User + // TrustedUser is the user for whom policies and protectors are allowed + // to be read. Specifically, if TrustedUser is set, then only + // policies and protectors owned by TrustedUser or by root will be + // allowed to be read. If it's nil, then all policies and protectors + // the process has filesystem-level read access to will be allowed. + TrustedUser *user.User +} + +// NewContextFromPath makes a context for the filesystem containing the +// specified path and whose Config is loaded from the global config file. On +// success, the Context contains a valid Config and Mount. The target user +// defaults to the current effective user if none is specified. +func NewContextFromPath(path string, targetUser *user.User) (*Context, error) { + ctx, err := newContextFromUser(targetUser) + if err != nil { + return nil, err + } + if ctx.Mount, err = filesystem.FindMount(path); err != nil { + return nil, err + } + + log.Printf("%s is on %s filesystem %q (%s)", path, + ctx.Mount.FilesystemType, ctx.Mount.Path, ctx.Mount.Device) + return ctx, nil +} + +// NewContextFromMountpoint makes a context for the filesystem at the specified +// mountpoint and whose Config is loaded from the global config file. On +// success, the Context contains a valid Config and Mount. The target user +// defaults to the current effective user if none is specified. +func NewContextFromMountpoint(mountpoint string, targetUser *user.User) (*Context, error) { + ctx, err := newContextFromUser(targetUser) + if err != nil { + return nil, err + } + if ctx.Mount, err = filesystem.GetMount(mountpoint); err != nil { + return nil, err + } + + log.Printf("found %s filesystem %q (%s)", ctx.Mount.FilesystemType, + ctx.Mount.Path, ctx.Mount.Device) + return ctx, nil +} + +// newContextFromUser makes a context with the corresponding target user, and +// whose Config is loaded from the global config file. If the target user is +// nil, the effective user is used. +func newContextFromUser(targetUser *user.User) (*Context, error) { + var err error + if targetUser == nil { + if targetUser, err = util.EffectiveUser(); err != nil { + return nil, err + } + } + + ctx := &Context{TargetUser: targetUser} + if ctx.Config, err = getConfig(); err != nil { + return nil, err + } + + // By default, when running as a non-root user we only read policies and + // protectors owned by the user or root. When running as root, we allow + // reading all policies and protectors. + if !ctx.Config.GetAllowCrossUserMetadata() && !util.IsUserRoot() { + ctx.TrustedUser, err = util.EffectiveUser() + if err != nil { + return nil, err + } + } + + log.Printf("creating context for user %q", targetUser.Username) + return ctx, nil +} + +// checkContext verifies that the context contains a valid config and a mount +// which is being used with fscrypt. +func (ctx *Context) checkContext() error { + if err := ctx.Config.CheckValidity(); err != nil { + return &ErrBadConfig{ctx.Config, err} + } + return ctx.Mount.CheckSetup(ctx.TrustedUser) +} + +func (ctx *Context) getKeyringOptions() *keyring.Options { + return &keyring.Options{ + Mount: ctx.Mount, + User: ctx.TargetUser, + UseFsKeyringForV1Policies: ctx.Config.GetUseFsKeyringForV1Policies(), + } +} + +// getProtectorOption returns the ProtectorOption for the protector on the +// context's mountpoint with the specified descriptor. +func (ctx *Context) getProtectorOption(protectorDescriptor string) *ProtectorOption { + mnt, data, err := ctx.Mount.GetProtector(protectorDescriptor, ctx.TrustedUser) + if err != nil { + return &ProtectorOption{ProtectorInfo{}, nil, err} + } + + info := ProtectorInfo{data} + // No linked path if on the same mountpoint + if mnt == ctx.Mount { + return &ProtectorOption{info, nil, nil} + } + return &ProtectorOption{info, mnt, nil} +} + +// ProtectorOptions creates a slice of all the options for all of the Protectors +// on the Context's mountpoint. +func (ctx *Context) ProtectorOptions() ([]*ProtectorOption, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + descriptors, err := ctx.Mount.ListProtectors(ctx.TrustedUser) + if err != nil { + return nil, err + } + + options := make([]*ProtectorOption, len(descriptors)) + for i, descriptor := range descriptors { + options[i] = ctx.getProtectorOption(descriptor) + } + return options, nil +} diff --git a/vendor/github.com/google/fscrypt/actions/policy.go b/vendor/github.com/google/fscrypt/actions/policy.go new file mode 100644 index 00000000000..3b201769320 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/policy.go @@ -0,0 +1,622 @@ +/* + * policy.go - functions for dealing with policies + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "log" + "os" + "os/user" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/keyring" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrAccessDeniedPossiblyV2 indicates that a directory's encryption policy +// couldn't be retrieved due to "permission denied", but it looks like it's due +// to the directory using a v2 policy but the kernel not supporting it. +type ErrAccessDeniedPossiblyV2 struct { + DirPath string +} + +func (err *ErrAccessDeniedPossiblyV2) Error() string { + return fmt.Sprintf(` + failed to get encryption policy of %s: permission denied + + This may be caused by the directory using a v2 encryption policy and the + current kernel not supporting it. If indeed the case, then this + directory can only be used on kernel v5.4 and later. You can create + directories accessible on older kernels by changing policy_version to 1 + in %s.`, + err.DirPath, ConfigFileLocation) +} + +// ErrAlreadyProtected indicates that a policy is already protected by the given +// protector. +type ErrAlreadyProtected struct { + Policy *Policy + Protector *Protector +} + +func (err *ErrAlreadyProtected) Error() string { + return fmt.Sprintf("policy %s is already protected by protector %s", + err.Policy.Descriptor(), err.Protector.Descriptor()) +} + +// ErrDifferentFilesystem indicates that a policy can't be applied to a +// directory on a different filesystem. +type ErrDifferentFilesystem struct { + PolicyMount *filesystem.Mount + PathMount *filesystem.Mount +} + +func (err *ErrDifferentFilesystem) Error() string { + return fmt.Sprintf(`cannot apply policy from filesystem %q to a + directory on filesystem %q. Policies may only protect files on the same + filesystem.`, err.PolicyMount.Path, err.PathMount.Path) +} + +// ErrMissingPolicyMetadata indicates that a directory is encrypted but its +// policy metadata cannot be found. +type ErrMissingPolicyMetadata struct { + Mount *filesystem.Mount + DirPath string + Descriptor string +} + +func (err *ErrMissingPolicyMetadata) Error() string { + return fmt.Sprintf(`filesystem %q does not contain the policy metadata + for %q. This directory has either been encrypted with another tool (such + as e4crypt), or the file %q has been deleted.`, + err.Mount.Path, err.DirPath, + err.Mount.PolicyPath(err.Descriptor)) +} + +// ErrNotProtected indicates that the given policy is not protected by the given +// protector. +type ErrNotProtected struct { + PolicyDescriptor string + ProtectorDescriptor string +} + +func (err *ErrNotProtected) Error() string { + return fmt.Sprintf(`policy %s is not protected by protector %s`, + err.PolicyDescriptor, err.ProtectorDescriptor) +} + +// ErrOnlyProtector indicates that the last protector can't be removed from a +// policy. +type ErrOnlyProtector struct { + Policy *Policy +} + +func (err *ErrOnlyProtector) Error() string { + return fmt.Sprintf(`cannot remove the only protector from policy %s. A + policy must have at least one protector.`, err.Policy.Descriptor()) +} + +// ErrPolicyMetadataMismatch indicates that the policy metadata for an encrypted +// directory is inconsistent with that directory. +type ErrPolicyMetadataMismatch struct { + DirPath string + Mount *filesystem.Mount + PathData *metadata.PolicyData + MountData *metadata.PolicyData +} + +func (err *ErrPolicyMetadataMismatch) Error() string { + return fmt.Sprintf(`inconsistent metadata between encrypted directory %q + and its corresponding metadata file %q. + + Directory has descriptor:%s %s + + Metadata file has descriptor:%s %s`, + err.DirPath, err.Mount.PolicyPath(err.PathData.KeyDescriptor), + err.PathData.KeyDescriptor, err.PathData.Options, + err.MountData.KeyDescriptor, err.MountData.Options) +} + +// PurgeAllPolicies removes all policy keys on the filesystem from the kernel +// keyring. In order for this to fully take effect, the filesystem may also need +// to be unmounted or caches dropped. +func PurgeAllPolicies(ctx *Context) error { + if err := ctx.checkContext(); err != nil { + return err + } + policies, err := ctx.Mount.ListPolicies(nil) + if err != nil { + return err + } + + for _, policyDescriptor := range policies { + err = keyring.RemoveEncryptionKey(policyDescriptor, ctx.getKeyringOptions(), false) + switch errors.Cause(err) { + case nil, keyring.ErrKeyNotPresent: + // We don't care if the key has already been removed + case keyring.ErrKeyFilesOpen: + log.Printf("Key for policy %s couldn't be fully removed because some files are still in-use", + policyDescriptor) + case keyring.ErrKeyAddedByOtherUsers: + log.Printf("Key for policy %s couldn't be fully removed because other user(s) have added it too", + policyDescriptor) + default: + return err + } + } + return nil +} + +// Policy represents an unlocked policy, so it contains the PolicyData as well +// as the actual protector key. These unlocked Polices can then be applied to a +// directory, or have their key material inserted into the keyring (which will +// allow encrypted files to be accessed). As with the key struct, a Policy +// should be wiped after use. +type Policy struct { + Context *Context + data *metadata.PolicyData + key *crypto.Key + created bool + ownerIfCreating *user.User + newLinkedProtectors []string +} + +// CreatePolicy creates a Policy protected by given Protector and stores the +// appropriate data on the filesystem. On error, no data is changed on the +// filesystem. +func CreatePolicy(ctx *Context, protector *Protector) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + // Randomly create the underlying policy key (and wipe if we fail) + key, err := crypto.NewRandomKey(metadata.PolicyKeyLen) + if err != nil { + return nil, err + } + + keyDescriptor, err := crypto.ComputeKeyDescriptor(key, ctx.Config.Options.PolicyVersion) + if err != nil { + key.Wipe() + return nil, err + } + + policy := &Policy{ + Context: ctx, + data: &metadata.PolicyData{ + Options: ctx.Config.Options, + KeyDescriptor: keyDescriptor, + }, + key: key, + created: true, + } + + policy.ownerIfCreating, err = getOwnerOfMetadataForProtector(protector) + if err != nil { + policy.Lock() + return nil, err + } + + if err = policy.AddProtector(protector); err != nil { + policy.Lock() + return nil, err + } + + return policy, nil +} + +// GetPolicy retrieves a locked policy with a specific descriptor. The Policy is +// still locked in this case, so it must be unlocked before using certain +// methods. +func GetPolicy(ctx *Context, descriptor string) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + data, err := ctx.Mount.GetPolicy(descriptor, ctx.TrustedUser) + if err != nil { + return nil, err + } + log.Printf("got data for %s from %q", descriptor, ctx.Mount.Path) + + return &Policy{Context: ctx, data: data}, nil +} + +// GetPolicyFromPath returns the locked policy descriptor for a file on the +// filesystem. The Policy is still locked in this case, so it must be unlocked +// before using certain methods. An error is returned if the metadata is +// inconsistent or the path is not encrypted. +func GetPolicyFromPath(ctx *Context, path string) (*Policy, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + + // We double check that the options agree for both the data we get from + // the path, and the data we get from the mountpoint. + pathData, err := metadata.GetPolicy(path) + err = ctx.Mount.EncryptionSupportError(err) + if err != nil { + // On kernels that don't support v2 encryption policies, trying + // to open a directory with a v2 policy simply gave EACCES. This + // is ambiguous with other errors, but try to detect this case + // and show a better error message. + if os.IsPermission(err) && + filesystem.HaveReadAccessTo(path) && + !keyring.IsFsKeyringSupported(ctx.Mount) { + return nil, &ErrAccessDeniedPossiblyV2{path} + } + return nil, err + } + descriptor := pathData.KeyDescriptor + log.Printf("found policy %s for %q", descriptor, path) + + mountData, err := ctx.Mount.GetPolicy(descriptor, ctx.TrustedUser) + if err != nil { + log.Printf("getting policy metadata: %v", err) + if _, ok := err.(*filesystem.ErrPolicyNotFound); ok { + return nil, &ErrMissingPolicyMetadata{ctx.Mount, path, descriptor} + } + return nil, err + } + log.Printf("found data for policy %s on %q", descriptor, ctx.Mount.Path) + + if !proto.Equal(pathData.Options, mountData.Options) || + pathData.KeyDescriptor != mountData.KeyDescriptor { + return nil, &ErrPolicyMetadataMismatch{path, ctx.Mount, pathData, mountData} + } + log.Print("data from filesystem and path agree") + + return &Policy{Context: ctx, data: mountData}, nil +} + +// ProtectorOptions creates a slice of ProtectorOptions for the protectors +// protecting this policy. +func (policy *Policy) ProtectorOptions() []*ProtectorOption { + options := make([]*ProtectorOption, len(policy.data.WrappedPolicyKeys)) + for i, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + options[i] = policy.Context.getProtectorOption(wrappedPolicyKey.ProtectorDescriptor) + } + return options +} + +// ProtectorDescriptors creates a slice of the Protector descriptors for the +// protectors protecting this policy. +func (policy *Policy) ProtectorDescriptors() []string { + descriptors := make([]string, len(policy.data.WrappedPolicyKeys)) + for i, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + descriptors[i] = wrappedPolicyKey.ProtectorDescriptor + } + return descriptors +} + +// Descriptor returns the key descriptor for this policy. +func (policy *Policy) Descriptor() string { + return policy.data.KeyDescriptor +} + +// Options returns the encryption options of this policy. +func (policy *Policy) Options() *metadata.EncryptionOptions { + return policy.data.Options +} + +// Version returns the version of this policy. +func (policy *Policy) Version() int64 { + return policy.data.Options.PolicyVersion +} + +// Destroy removes a policy from the filesystem. It also removes any new +// protector links that were created for the policy. This does *not* wipe the +// policy's internal key from memory; use Lock() to do that. +func (policy *Policy) Destroy() error { + for _, protectorDescriptor := range policy.newLinkedProtectors { + policy.Context.Mount.RemoveProtector(protectorDescriptor) + } + return policy.Context.Mount.RemovePolicy(policy.Descriptor()) +} + +// Revert destroys a policy if it was created, but does nothing if it was just +// queried from the filesystem. +func (policy *Policy) Revert() error { + if !policy.created { + return nil + } + return policy.Destroy() +} + +func (policy *Policy) String() string { + return fmt.Sprintf("Policy: %s\nMountpoint: %s\nOptions: %v\nProtectors:%+v", + policy.Descriptor(), policy.Context.Mount, policy.data.Options, + policy.ProtectorDescriptors()) +} + +// Unlock unwraps the Policy's internal key. As a Protector is needed to unlock +// the Policy, callbacks to select the Policy and get the key are needed. This +// method will retry the keyFn as necessary to get the correct key for the +// selected protector. Does nothing if policy is already unlocked. +func (policy *Policy) Unlock(optionFn OptionFunc, keyFn KeyFunc) error { + if policy.key != nil { + return nil + } + options := policy.ProtectorOptions() + + // The OptionFunc indicates which option and wrapped key we should use. + idx, err := optionFn(policy.Descriptor(), options) + if err != nil { + return err + } + option := options[idx] + if option.LoadError != nil { + return option.LoadError + } + + log.Printf("protector %s selected in callback", option.Descriptor()) + protectorKey, err := unwrapProtectorKey(option.ProtectorInfo, keyFn) + if err != nil { + return err + } + defer protectorKey.Wipe() + + log.Printf("unwrapping policy %s with protector", policy.Descriptor()) + wrappedPolicyKey := policy.data.WrappedPolicyKeys[idx].WrappedKey + policy.key, err = crypto.Unwrap(protectorKey, wrappedPolicyKey) + return err +} + +// UnlockWithProtector uses an unlocked Protector to unlock a policy. An error +// is returned if the Protector is not yet unlocked or does not protect the +// policy. Does nothing if policy is already unlocked. +func (policy *Policy) UnlockWithProtector(protector *Protector) error { + if policy.key != nil { + return nil + } + if protector.key == nil { + return ErrLocked + } + idx, ok := policy.findWrappedKeyIndex(protector.Descriptor()) + if !ok { + return &ErrNotProtected{policy.Descriptor(), protector.Descriptor()} + } + + var err error + wrappedPolicyKey := policy.data.WrappedPolicyKeys[idx].WrappedKey + policy.key, err = crypto.Unwrap(protector.key, wrappedPolicyKey) + return err +} + +// Lock wipes a Policy's internal Key. It should always be called after using a +// Policy. This is often done with a defer statement. There is no effect if +// called multiple times. +func (policy *Policy) Lock() error { + err := policy.key.Wipe() + policy.key = nil + return err +} + +// UsesProtector returns if the policy is protected with the protector +func (policy *Policy) UsesProtector(protector *Protector) bool { + _, ok := policy.findWrappedKeyIndex(protector.Descriptor()) + return ok +} + +// getOwnerOfMetadataForProtector returns the User to whom the owner of any new +// policies or protector links for the given protector should be set. +// +// This will return a non-nil value only when the protector is a login protector +// and the process is running as root. In this scenario, root is setting up +// encryption on the user's behalf, so we need to make new policies and +// protector links owned by the user (rather than root) to allow them to be read +// by the user, just like the login protector itself which is handled elsewhere. +func getOwnerOfMetadataForProtector(protector *Protector) (*user.User, error) { + if protector.data.Source == metadata.SourceType_pam_passphrase && util.IsUserRoot() { + owner, err := util.UserFromUID(protector.data.Uid) + if err != nil { + return nil, err + } + return owner, nil + } + return nil, nil +} + +// AddProtector updates the data that is wrapping the Policy Key so that the +// provided Protector is now protecting the specified Policy. If an error is +// returned, no data has been changed. If the policy and protector are on +// different filesystems, a link will be created between them. The policy and +// protector must both be unlocked. +func (policy *Policy) AddProtector(protector *Protector) error { + if policy.UsesProtector(protector) { + return &ErrAlreadyProtected{policy, protector} + } + if policy.key == nil || protector.key == nil { + return ErrLocked + } + + // If the protector is on a different filesystem, we need to add a link + // to it on the policy's filesystem. + if policy.Context.Mount != protector.Context.Mount { + log.Printf("policy on %s\n protector on %s\n", policy.Context.Mount, protector.Context.Mount) + ownerIfCreating, err := getOwnerOfMetadataForProtector(protector) + if err != nil { + return err + } + isNewLink, err := policy.Context.Mount.AddLinkedProtector( + protector.Descriptor(), protector.Context.Mount, + protector.Context.TrustedUser, ownerIfCreating) + if err != nil { + return err + } + if isNewLink { + policy.newLinkedProtectors = append(policy.newLinkedProtectors, + protector.Descriptor()) + } + } else { + log.Printf("policy and protector both on %q", policy.Context.Mount) + } + + // Create the wrapped policy key + wrappedKey, err := crypto.Wrap(protector.key, policy.key) + if err != nil { + return err + } + + // Append the wrapped key to the data + policy.addKey(&metadata.WrappedPolicyKey{ + ProtectorDescriptor: protector.Descriptor(), + WrappedKey: wrappedKey, + }) + + if err := policy.commitData(); err != nil { + // revert the addition on failure + policy.removeKey(len(policy.data.WrappedPolicyKeys) - 1) + return err + } + return nil +} + +// RemoveProtector updates the data that is wrapping the Policy Key so that the +// protector with the given descriptor is no longer protecting the specified +// Policy. If an error is returned, no data has been changed. Note that the +// protector itself won't be removed, nor will a link to the protector be +// removed (in the case where the protector and policy are on different +// filesystems). The policy can be locked or unlocked. +func (policy *Policy) RemoveProtector(protectorDescriptor string) error { + idx, ok := policy.findWrappedKeyIndex(protectorDescriptor) + if !ok { + return &ErrNotProtected{policy.Descriptor(), protectorDescriptor} + } + + if len(policy.data.WrappedPolicyKeys) == 1 { + return &ErrOnlyProtector{policy} + } + + // Remove the wrapped key from the data + toRemove := policy.removeKey(idx) + + if err := policy.commitData(); err != nil { + // revert the removal on failure (order is irrelevant) + policy.addKey(toRemove) + return err + } + return nil +} + +// Apply sets the Policy on a specified directory. Currently we impose the +// additional constraint that policies and the directories they are applied to +// must reside on the same filesystem. +func (policy *Policy) Apply(path string) error { + if pathMount, err := filesystem.FindMount(path); err != nil { + return err + } else if pathMount != policy.Context.Mount { + return &ErrDifferentFilesystem{policy.Context.Mount, pathMount} + } + + err := metadata.SetPolicy(path, policy.data) + return policy.Context.Mount.EncryptionSupportError(err) +} + +// GetProvisioningStatus returns the status of this policy's key in the keyring. +func (policy *Policy) GetProvisioningStatus() keyring.KeyStatus { + status, _ := keyring.GetEncryptionKeyStatus(policy.Descriptor(), + policy.Context.getKeyringOptions()) + return status +} + +// IsProvisionedByTargetUser returns true if the policy's key is present in the +// target kernel keyring, but not if that keyring is a filesystem keyring and +// the key only been added by users other than Context.TargetUser. +func (policy *Policy) IsProvisionedByTargetUser() bool { + return policy.GetProvisioningStatus() == keyring.KeyPresent +} + +// Provision inserts the Policy key into the kernel keyring. This allows reading +// and writing of files encrypted with this directory. Requires unlocked Policy. +func (policy *Policy) Provision() error { + if policy.key == nil { + return ErrLocked + } + return keyring.AddEncryptionKey(policy.key, policy.Descriptor(), + policy.Context.getKeyringOptions()) +} + +// Deprovision removes the Policy key from the kernel keyring. This prevents +// reading and writing to the directory --- unless the target keyring is a user +// keyring, in which case caches must be dropped too. If the Policy key was +// already removed, returns keyring.ErrKeyNotPresent. +func (policy *Policy) Deprovision(allUsers bool) error { + return keyring.RemoveEncryptionKey(policy.Descriptor(), + policy.Context.getKeyringOptions(), allUsers) +} + +// NeedsUserKeyring returns true if Provision and Deprovision for this policy +// will use a user keyring (deprecated), not a filesystem keyring. +func (policy *Policy) NeedsUserKeyring() bool { + return policy.Version() == 1 && !policy.Context.Config.GetUseFsKeyringForV1Policies() +} + +// NeedsRootToProvision returns true if Provision and Deprovision will require +// root for this policy in the current configuration. +func (policy *Policy) NeedsRootToProvision() bool { + return policy.Version() == 1 && policy.Context.Config.GetUseFsKeyringForV1Policies() +} + +// CanBeAppliedWithoutProvisioning returns true if this process can apply this +// policy to a directory without first calling Provision. +func (policy *Policy) CanBeAppliedWithoutProvisioning() bool { + return policy.Version() == 1 || util.IsUserRoot() +} + +// commitData writes the Policy's current data to the filesystem. +func (policy *Policy) commitData() error { + return policy.Context.Mount.AddPolicy(policy.data, policy.ownerIfCreating) +} + +// findWrappedPolicyKey returns the index of the wrapped policy key +// corresponding to this policy and protector. The returned bool is false if no +// wrapped policy key corresponds to the specified protector, true otherwise. +func (policy *Policy) findWrappedKeyIndex(protectorDescriptor string) (int, bool) { + for idx, wrappedPolicyKey := range policy.data.WrappedPolicyKeys { + if wrappedPolicyKey.ProtectorDescriptor == protectorDescriptor { + return idx, true + } + } + return 0, false +} + +// addKey adds the wrapped policy key to end of the wrapped key data. +func (policy *Policy) addKey(toAdd *metadata.WrappedPolicyKey) { + policy.data.WrappedPolicyKeys = append(policy.data.WrappedPolicyKeys, toAdd) +} + +// removeKey removes the wrapped policy key at the specified index. This +// does not preserve the order of the wrapped policy key array. If no index is +// specified the last key is removed. +func (policy *Policy) removeKey(index int) *metadata.WrappedPolicyKey { + lastIdx := len(policy.data.WrappedPolicyKeys) - 1 + toRemove := policy.data.WrappedPolicyKeys[index] + + // See https://github.com/golang/go/wiki/SliceTricks + policy.data.WrappedPolicyKeys[index] = policy.data.WrappedPolicyKeys[lastIdx] + policy.data.WrappedPolicyKeys[lastIdx] = nil + policy.data.WrappedPolicyKeys = policy.data.WrappedPolicyKeys[:lastIdx] + + return toRemove +} diff --git a/vendor/github.com/google/fscrypt/actions/protector.go b/vendor/github.com/google/fscrypt/actions/protector.go new file mode 100644 index 00000000000..b986eb020b7 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/protector.go @@ -0,0 +1,300 @@ +/* + * protector.go - functions for dealing with protectors + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "log" + "os/user" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// LoginProtectorMountpoint is the mountpoint where login protectors are stored. +// This can be overridden by the user of this package. +var LoginProtectorMountpoint = "/" + +// ErrLoginProtectorExists indicates that a user already has a login protector. +type ErrLoginProtectorExists struct { + User *user.User +} + +func (err *ErrLoginProtectorExists) Error() string { + return fmt.Sprintf("user %q already has a login protector", err.User.Username) +} + +// ErrLoginProtectorName indicates that a name was given for a login protector. +type ErrLoginProtectorName struct { + Name string + User *user.User +} + +func (err *ErrLoginProtectorName) Error() string { + return fmt.Sprintf(`cannot assign name %q to new login protector for + user %q because login protectors are identified by user, not by name.`, + err.Name, err.User.Username) +} + +// ErrMissingProtectorName indicates that a protector name is needed. +type ErrMissingProtectorName struct { + Source metadata.SourceType +} + +func (err *ErrMissingProtectorName) Error() string { + return fmt.Sprintf("%s protectors must be named", err.Source) +} + +// ErrProtectorNameExists indicates that a protector name already exists. +type ErrProtectorNameExists struct { + Name string +} + +func (err *ErrProtectorNameExists) Error() string { + return fmt.Sprintf("there is already a protector named %q", err.Name) +} + +// checkForProtectorWithName returns an error if there is already a protector +// on the filesystem with a specific name (or if we cannot read the necessary +// data). +func checkForProtectorWithName(ctx *Context, name string) error { + options, err := ctx.ProtectorOptions() + if err != nil { + return err + } + for _, option := range options { + if option.Name() == name { + return &ErrProtectorNameExists{name} + } + } + return nil +} + +// checkIfUserHasLoginProtector returns an error if there is already a login +// protector on the filesystem for a specific user (or if we cannot read the +// necessary data). +func checkIfUserHasLoginProtector(ctx *Context, uid int64) error { + options, err := ctx.ProtectorOptions() + if err != nil { + return err + } + for _, option := range options { + if option.Source() == metadata.SourceType_pam_passphrase && option.UID() == uid { + return &ErrLoginProtectorExists{ctx.TargetUser} + } + } + return nil +} + +// Protector represents an unlocked protector, so it contains the ProtectorData +// as well as the actual protector key. These unlocked Protectors are necessary +// to unlock policies and create new polices. As with the key struct, a +// Protector should be wiped after use. +type Protector struct { + Context *Context + data *metadata.ProtectorData + key *crypto.Key + created bool + ownerIfCreating *user.User +} + +// CreateProtector creates an unlocked protector with a given name (name only +// needed for custom and raw protector types). The keyFn provided to create the +// Protector key will only be called once. If an error is returned, no data has +// been changed on the filesystem. +func CreateProtector(ctx *Context, name string, keyFn KeyFunc, owner *user.User) (*Protector, error) { + if err := ctx.checkContext(); err != nil { + return nil, err + } + // Sanity checks for names + if ctx.Config.Source == metadata.SourceType_pam_passphrase { + // login protectors don't need a name (we use the username instead) + if name != "" { + return nil, &ErrLoginProtectorName{name, ctx.TargetUser} + } + } else { + // non-login protectors need a name (so we can distinguish between them) + if name == "" { + return nil, &ErrMissingProtectorName{ctx.Config.Source} + } + // we don't want to duplicate naming + if err := checkForProtectorWithName(ctx, name); err != nil { + return nil, err + } + } + + var err error + protector := &Protector{ + Context: ctx, + data: &metadata.ProtectorData{ + Name: name, + Source: ctx.Config.Source, + }, + created: true, + ownerIfCreating: owner, + } + + // Extra data is needed for some SourceTypes + switch protector.data.Source { + case metadata.SourceType_pam_passphrase: + // As the pam passphrases are user specific, we also store the + // UID for this kind of source. + protector.data.Uid = int64(util.AtoiOrPanic(ctx.TargetUser.Uid)) + // Make sure we aren't duplicating protectors + if err = checkIfUserHasLoginProtector(ctx, protector.data.Uid); err != nil { + return nil, err + } + fallthrough + case metadata.SourceType_custom_passphrase: + // Our passphrase sources need costs and a random salt. + if protector.data.Salt, err = crypto.NewRandomBuffer(metadata.SaltLen); err != nil { + return nil, err + } + + protector.data.Costs = ctx.Config.HashCosts + } + + // Randomly create the underlying protector key (and wipe if we fail) + if protector.key, err = crypto.NewRandomKey(metadata.InternalKeyLen); err != nil { + return nil, err + } + protector.data.ProtectorDescriptor, err = crypto.ComputeKeyDescriptor(protector.key, 1) + if err != nil { + protector.Lock() + return nil, err + } + + if err = protector.Rewrap(keyFn); err != nil { + protector.Lock() + return nil, err + } + + return protector, nil +} + +// GetProtector retrieves a Protector with a specific descriptor. The Protector +// is still locked in this case, so it must be unlocked before using certain +// methods. +func GetProtector(ctx *Context, descriptor string) (*Protector, error) { + log.Printf("Getting protector %s", descriptor) + err := ctx.checkContext() + if err != nil { + return nil, err + } + + protector := &Protector{Context: ctx} + protector.data, err = ctx.Mount.GetRegularProtector(descriptor, ctx.TrustedUser) + return protector, err +} + +// GetProtectorFromOption retrieves a protector based on a protector option. +// If the option had a load error, this function returns that error. The +// Protector is still locked in this case, so it must be unlocked before using +// certain methods. +func GetProtectorFromOption(ctx *Context, option *ProtectorOption) (*Protector, error) { + log.Printf("Getting protector %s from option", option.Descriptor()) + if err := ctx.checkContext(); err != nil { + return nil, err + } + if option.LoadError != nil { + return nil, option.LoadError + } + + // Replace the context if this is a linked protector + if option.LinkedMount != nil { + ctx = &Context{ctx.Config, option.LinkedMount, ctx.TargetUser, ctx.TrustedUser} + } + return &Protector{Context: ctx, data: option.data}, nil +} + +// Descriptor returns the protector descriptor. +func (protector *Protector) Descriptor() string { + return protector.data.ProtectorDescriptor +} + +// Destroy removes a protector from the filesystem. The internal key should +// still be wiped with Lock(). +func (protector *Protector) Destroy() error { + return protector.Context.Mount.RemoveProtector(protector.Descriptor()) +} + +// Revert destroys a protector if it was created, but does nothing if it was +// just queried from the filesystem. +func (protector *Protector) Revert() error { + if !protector.created { + return nil + } + return protector.Destroy() +} + +func (protector *Protector) String() string { + return fmt.Sprintf("Protector: %s\nMountpoint: %s\nSource: %s\nName: %s\nCosts: %v\nUID: %d", + protector.Descriptor(), protector.Context.Mount, protector.data.Source, + protector.data.Name, protector.data.Costs, protector.data.Uid) +} + +// Unlock unwraps the Protector's internal key. The keyFn provided to unwrap the +// Protector key will be retried as necessary to get the correct key. Lock() +// should be called after use. Does nothing if protector is already unlocked. +func (protector *Protector) Unlock(keyFn KeyFunc) (err error) { + if protector.key != nil { + return + } + protector.key, err = unwrapProtectorKey(ProtectorInfo{protector.data}, keyFn) + return +} + +// Lock wipes a Protector's internal Key. It should always be called after using +// an unlocked Protector. This is often done with a defer statement. There is +// no effect if called multiple times. +func (protector *Protector) Lock() error { + err := protector.key.Wipe() + protector.key = nil + return err +} + +// Rewrap updates the data that is wrapping the Protector Key. This is useful if +// a user's password has changed, for example. The keyFn provided to rewrap +// the Protector key will only be called once. Requires unlocked Protector. +func (protector *Protector) Rewrap(keyFn KeyFunc) error { + if protector.key == nil { + return ErrLocked + } + wrappingKey, err := getWrappingKey(ProtectorInfo{protector.data}, keyFn, false) + if err != nil { + return err + } + + // Revert change to wrapped key on failure + oldWrappedKey := protector.data.WrappedKey + defer func() { + wrappingKey.Wipe() + if err != nil { + protector.data.WrappedKey = oldWrappedKey + } + }() + + if protector.data.WrappedKey, err = crypto.Wrap(wrappingKey, protector.key); err != nil { + return err + } + + return protector.Context.Mount.AddProtector(protector.data, protector.ownerIfCreating) +} diff --git a/vendor/github.com/google/fscrypt/actions/recovery.go b/vendor/github.com/google/fscrypt/actions/recovery.go new file mode 100644 index 00000000000..8a769cc7ee6 --- /dev/null +++ b/vendor/github.com/google/fscrypt/actions/recovery.go @@ -0,0 +1,131 @@ +/* + * recovery.go - support for generating recovery passphrases + * + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package actions + +import ( + "fmt" + "os" + "strconv" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// modifiedContextWithSource returns a copy of ctx with the protector source +// replaced by source. +func modifiedContextWithSource(ctx *Context, source metadata.SourceType) *Context { + modifiedConfig := *ctx.Config + modifiedConfig.Source = source + modifiedCtx := *ctx + modifiedCtx.Config = &modifiedConfig + return &modifiedCtx +} + +// AddRecoveryPassphrase randomly generates a recovery passphrase and adds it as +// a custom_passphrase protector for the given Policy. +func AddRecoveryPassphrase(policy *Policy, dirname string) (*crypto.Key, *Protector, error) { + // 20 random characters in a-z is 94 bits of entropy, which is way more + // than enough for a passphrase which still goes through the usual + // passphrase hashing which makes it extremely costly to brute force. + passphrase, err := crypto.NewRandomPassphrase(20) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + passphrase.Wipe() + } + }() + getPassphraseFn := func(info ProtectorInfo, retry bool) (*crypto.Key, error) { + // CreateProtector() wipes the passphrase, but in this case we + // still need it for later, so make a copy. + return passphrase.Clone() + } + var recoveryProtector *Protector + customCtx := modifiedContextWithSource(policy.Context, metadata.SourceType_custom_passphrase) + seq := 1 + for { + // Automatically generate a name for the recovery protector. + name := "Recovery passphrase for " + dirname + if seq != 1 { + name += " (" + strconv.Itoa(seq) + ")" + } + recoveryProtector, err = CreateProtector(customCtx, name, getPassphraseFn, policy.ownerIfCreating) + if err == nil { + break + } + if _, ok := err.(*ErrProtectorNameExists); !ok { + return nil, nil, err + } + seq++ + } + if err := policy.AddProtector(recoveryProtector); err != nil { + recoveryProtector.Revert() + return nil, nil, err + } + return passphrase, recoveryProtector, nil +} + +// WriteRecoveryInstructions writes a recovery passphrase and instructions to a +// file. This file should initially be located in the encrypted directory +// protected by the passphrase itself. It's up to the user to store the +// passphrase in a different location if they actually need it. +func WriteRecoveryInstructions(recoveryPassphrase *crypto.Key, recoveryProtector *Protector, + policy *Policy, path string) error { + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + defer file.Close() + str := fmt.Sprintf( + `fscrypt automatically generated a recovery passphrase for this directory: + + %s + +It did this because you chose to protect this directory with your login +passphrase, but this directory is not on the root filesystem. + +Copy this passphrase to a safe place if you want to still be able to unlock this +directory if you re-install the operating system or connect this storage media +to a different system (which would result in your login protector being lost). + +To unlock this directory using this recovery passphrase, run 'fscrypt unlock' +and select the protector named %q. + +If you want to disable recovery passphrase generation (not recommended), +re-create this directory and pass the --no-recovery option to 'fscrypt encrypt'. +Alternatively, you can remove this recovery passphrase protector using: + + fscrypt metadata remove-protector-from-policy --force --protector=%s:%s --policy=%s:%s + +It is safe to keep it around though, as the recovery passphrase is high-entropy. +`, recoveryPassphrase.Data(), recoveryProtector.data.Name, + recoveryProtector.Context.Mount.Path, recoveryProtector.data.ProtectorDescriptor, + policy.Context.Mount.Path, policy.data.KeyDescriptor) + if _, err = file.WriteString(str); err != nil { + return err + } + if recoveryProtector.ownerIfCreating != nil { + if err = util.Chown(file, recoveryProtector.ownerIfCreating); err != nil { + return err + } + } + return file.Sync() +} diff --git a/vendor/github.com/google/fscrypt/crypto/crypto.go b/vendor/github.com/google/fscrypt/crypto/crypto.go new file mode 100644 index 00000000000..1f64b38bbd5 --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/crypto.go @@ -0,0 +1,228 @@ +/* + * crypto.go - Cryptographic algorithms used by the rest of fscrypt. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package crypto manages all the cryptography for fscrypt. This includes: +// - Key management (key.go) +// - Securely holding keys in memory +// - Making recovery keys +// - Randomness (rand.go) +// - Cryptographic algorithms (crypto.go) +// - encryption (AES256-CTR) +// - authentication (SHA256-based HMAC) +// - key stretching (SHA256-based HKDF) +// - key wrapping/unwrapping (Encrypt then MAC) +// - passphrase-based key derivation (Argon2id) +// - key descriptor computation (double SHA512, or HKDF-SHA512) +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "io" + + "github.com/pkg/errors" + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/hkdf" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// Crypto error values +var ( + ErrBadAuth = errors.New("key authentication check failed") + ErrRecoveryCode = errors.New("invalid recovery code") + ErrMlockUlimit = errors.New("could not lock key in memory") +) + +// panicInputLength panics if "name" has invalid length (expected != actual) +func panicInputLength(name string, expected, actual int) { + if err := util.CheckValidLength(expected, actual); err != nil { + panic(errors.Wrap(err, name)) + } +} + +// checkWrappingKey returns an error if the wrapping key has the wrong length +func checkWrappingKey(wrappingKey *Key) error { + err := util.CheckValidLength(metadata.InternalKeyLen, wrappingKey.Len()) + return errors.Wrap(err, "wrapping key") +} + +// stretchKey stretches a key of length InternalKeyLen using unsalted HKDF to +// make two keys of length InternalKeyLen. +func stretchKey(key *Key) (encKey, authKey *Key) { + panicInputLength("hkdf key", metadata.InternalKeyLen, key.Len()) + + // The new hkdf function uses the hash and key to create a reader that + // can be used to securely initialize multiple keys. This means that + // reads on the hkdf give independent cryptographic keys. The hkdf will + // also always have enough entropy to read two keys. + hkdf := hkdf.New(sha256.New, key.data, nil, nil) + + encKey, err := NewFixedLengthKeyFromReader(hkdf, metadata.InternalKeyLen) + util.NeverError(err) + authKey, err = NewFixedLengthKeyFromReader(hkdf, metadata.InternalKeyLen) + util.NeverError(err) + + return +} + +// aesCTR runs AES256-CTR on the input using the provided key and iv. This +// function can be used to either encrypt or decrypt input of any size. Note +// that input and output must be the same size. +func aesCTR(key *Key, iv, input, output []byte) { + panicInputLength("aesCTR key", metadata.InternalKeyLen, key.Len()) + panicInputLength("aesCTR iv", metadata.IVLen, len(iv)) + panicInputLength("aesCTR output", len(input), len(output)) + + blockCipher, err := aes.NewCipher(key.data) + util.NeverError(err) // Key is checked to have correct length + + stream := cipher.NewCTR(blockCipher, iv) + stream.XORKeyStream(output, input) +} + +// getHMAC returns the SHA256-based HMAC of some data using the provided key. +func getHMAC(key *Key, data ...[]byte) []byte { + panicInputLength("hmac key", metadata.InternalKeyLen, key.Len()) + + mac := hmac.New(sha256.New, key.data) + for _, buffer := range data { + // SHA256 HMAC should never be unable to write the data + _, err := mac.Write(buffer) + util.NeverError(err) + } + + return mac.Sum(nil) +} + +// Wrap takes a wrapping Key of length InternalKeyLen, and uses it to wrap a +// secret Key of any length. This wrapping uses a random IV, the encrypted data, +// and an HMAC to verify the wrapping key was correct. All of this is included +// in the returned WrappedKeyData structure. +func Wrap(wrappingKey, secretKey *Key) (*metadata.WrappedKeyData, error) { + if err := checkWrappingKey(wrappingKey); err != nil { + return nil, err + } + + data := &metadata.WrappedKeyData{EncryptedKey: make([]byte, secretKey.Len())} + + // Get random IV + var err error + if data.IV, err = NewRandomBuffer(metadata.IVLen); err != nil { + return nil, err + } + + // Stretch key for encryption and authentication (unsalted). + encKey, authKey := stretchKey(wrappingKey) + defer encKey.Wipe() + defer authKey.Wipe() + + // Encrypt the secret and include the HMAC of the output ("Encrypt-then-MAC"). + aesCTR(encKey, data.IV, secretKey.data, data.EncryptedKey) + + data.Hmac = getHMAC(authKey, data.IV, data.EncryptedKey) + return data, nil +} + +// Unwrap takes a wrapping Key of length InternalKeyLen, and uses it to unwrap +// the WrappedKeyData to get the unwrapped secret Key. The Wrapped Key data +// includes an authentication check, so an error will be returned if that check +// fails. +func Unwrap(wrappingKey *Key, data *metadata.WrappedKeyData) (*Key, error) { + if err := checkWrappingKey(wrappingKey); err != nil { + return nil, err + } + + // Stretch key for encryption and authentication (unsalted). + encKey, authKey := stretchKey(wrappingKey) + defer encKey.Wipe() + defer authKey.Wipe() + + // Check validity of the HMAC + if !hmac.Equal(getHMAC(authKey, data.IV, data.EncryptedKey), data.Hmac) { + return nil, ErrBadAuth + } + + secretKey, err := NewBlankKey(len(data.EncryptedKey)) + if err != nil { + return nil, err + } + aesCTR(encKey, data.IV, data.EncryptedKey, secretKey.data) + + return secretKey, nil +} + +func computeKeyDescriptorV1(key *Key) string { + h1 := sha512.Sum512(key.data) + h2 := sha512.Sum512(h1[:]) + length := hex.DecodedLen(metadata.PolicyDescriptorLenV1) + return hex.EncodeToString(h2[:length]) +} + +func computeKeyDescriptorV2(key *Key) (string, error) { + // This algorithm is specified by the kernel. It uses unsalted + // HKDF-SHA512, where the application-information string is the prefix + // "fscrypt\0" followed by the HKDF_CONTEXT_KEY_IDENTIFIER byte. + hkdf := hkdf.New(sha512.New, key.data, nil, []byte("fscrypt\x00\x01")) + h := make([]byte, hex.DecodedLen(metadata.PolicyDescriptorLenV2)) + if _, err := io.ReadFull(hkdf, h); err != nil { + return "", err + } + return hex.EncodeToString(h), nil +} + +// ComputeKeyDescriptor computes the descriptor for a given cryptographic key. +// If policyVersion=1, it uses the first 8 bytes of the double application of +// SHA512 on the key. Use this for protectors and v1 policy keys. +// If policyVersion=2, it uses HKDF-SHA512 to compute a key identifier that's +// compatible with the kernel's key identifiers for v2 policy keys. +// In both cases, the resulting bytes are formatted as hex. +func ComputeKeyDescriptor(key *Key, policyVersion int64) (string, error) { + switch policyVersion { + case 1: + return computeKeyDescriptorV1(key), nil + case 2: + return computeKeyDescriptorV2(key) + default: + return "", errors.Errorf("policy version of %d is invalid", policyVersion) + } +} + +// PassphraseHash uses Argon2id to produce a Key given the passphrase, salt, and +// hashing costs. This method is designed to take a long time and consume +// considerable memory. For more information, see the documentation at +// https://godoc.org/golang.org/x/crypto/argon2. +func PassphraseHash(passphrase *Key, salt []byte, costs *metadata.HashingCosts) (*Key, error) { + t := uint32(costs.Time) + m := uint32(costs.Memory) + p := uint8(costs.Parallelism) + key := argon2.IDKey(passphrase.data, salt, t, m, p, metadata.InternalKeyLen) + + hash, err := NewBlankKey(metadata.InternalKeyLen) + if err != nil { + return nil, err + } + copy(hash.data, key) + return hash, nil +} diff --git a/vendor/github.com/google/fscrypt/crypto/key.go b/vendor/github.com/google/fscrypt/crypto/key.go new file mode 100644 index 00000000000..2e5744336de --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/key.go @@ -0,0 +1,354 @@ +/* + * key.go - Cryptographic key management for fscrypt. Ensures that sensitive + * material is properly handled throughout the program. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package crypto + +/* +#include +#include +*/ +import "C" + +import ( + "bytes" + "crypto/subtle" + "encoding/base32" + "io" + "log" + "os" + "runtime" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +const ( + // Keys need to readable and writable, but hidden from other processes. + keyProtection = unix.PROT_READ | unix.PROT_WRITE + keyMmapFlags = unix.MAP_PRIVATE | unix.MAP_ANONYMOUS +) + +/* +UseMlock determines whether we should use the mlock/munlock syscalls to +prevent sensitive data like keys and passphrases from being paged to disk. +UseMlock defaults to true, but can be set to false if the application calling +into this library has insufficient privileges to lock memory. Code using this +package could also bind this setting to a flag by using: + + flag.BoolVar(&crypto.UseMlock, "lock-memory", true, "lock keys in memory") +*/ +var UseMlock = true + +/* +Key protects some arbitrary buffer of cryptographic material. Its methods +ensure that the Key's data is locked in memory before being used (if +UseMlock is set to true), and is wiped and unlocked after use (via the Wipe() +method). This data is never accessed outside of the fscrypt/crypto package +(except for the UnsafeData method). If a key is successfully created, the +Wipe() method should be called after it's use. For example: + + func UseKeyFromStdin() error { + key, err := NewKeyFromReader(os.Stdin) + if err != nil { + return err + } + defer key.Wipe() + + // Do stuff with key + + return nil + } + +The Wipe() method will also be called when a key is garbage collected; however, +it is best practice to clear the key as soon as possible, so it spends a minimal +amount of time in memory. + +Note that Key is not thread safe, as a key could be wiped while another thread +is using it. Also, calling Wipe() from two threads could cause an error as +memory could be freed twice. +*/ +type Key struct { + data []byte +} + +// NewBlankKey constructs a blank key of a specified length and returns an error +// if we are unable to allocate or lock the necessary memory. +func NewBlankKey(length int) (*Key, error) { + if length == 0 { + return &Key{data: nil}, nil + } else if length < 0 { + return nil, errors.Errorf("requested key length %d is negative", length) + } + + flags := keyMmapFlags + if UseMlock { + flags |= unix.MAP_LOCKED + } + + // See MAP_ANONYMOUS in http://man7.org/linux/man-pages/man2/mmap.2.html + data, err := unix.Mmap(-1, 0, length, keyProtection, flags) + if err == unix.EAGAIN { + return nil, ErrMlockUlimit + } + if err != nil { + return nil, errors.Wrapf(err, + "failed to allocate (mmap) key buffer of length %d", length) + } + + key := &Key{data: data} + + // Backup finalizer in case user forgets to "defer key.Wipe()" + runtime.SetFinalizer(key, (*Key).Wipe) + return key, nil +} + +// Wipe destroys a Key by zeroing and freeing the memory. The data is zeroed +// even if Wipe returns an error, which occurs if we are unable to unlock or +// free the key memory. Wipe does nothing if the key is already wiped or is nil. +func (key *Key) Wipe() error { + // We do nothing if key or key.data is nil so that Wipe() is idempotent + // and so Wipe() can be called on keys which have already been cleared. + if key != nil && key.data != nil { + data := key.data + key.data = nil + + for i := range data { + data[i] = 0 + } + + if err := unix.Munmap(data); err != nil { + log.Printf("unix.Munmap() failed: %v", err) + return errors.Wrapf(err, "failed to free (munmap) key buffer") + } + } + return nil +} + +// Len is the underlying data buffer's length. +func (key *Key) Len() int { + return len(key.data) +} + +// Equals compares the contents of two keys, returning true if they have the same +// key data. This function runs in constant time. +func (key *Key) Equals(key2 *Key) bool { + return subtle.ConstantTimeCompare(key.data, key2.data) == 1 +} + +// resize returns a new key with size requestedSize and the appropriate data +// copied over. The original data is wiped. This method does nothing and returns +// itself if the key's length equals requestedSize. +func (key *Key) resize(requestedSize int) (*Key, error) { + if key.Len() == requestedSize { + return key, nil + } + defer key.Wipe() + + resizedKey, err := NewBlankKey(requestedSize) + if err != nil { + return nil, err + } + copy(resizedKey.data, key.data) + return resizedKey, nil +} + +// Data returns a slice of the key's underlying data. Note that this may become +// outdated if the key is resized. +func (key *Key) Data() []byte { + return key.data +} + +// UnsafePtr returns an unsafe pointer to the key's underlying data. Note that +// this will only be valid as long as the key is not resized. +func (key *Key) UnsafePtr() unsafe.Pointer { + return util.Ptr(key.data) +} + +// UnsafeToCString makes a copy of the string's data into a null-terminated C +// string allocated by C. Note that this method is unsafe as this C copy has no +// locking or wiping functionality. The key shouldn't contain any `\0` bytes. +func (key *Key) UnsafeToCString() unsafe.Pointer { + size := C.size_t(key.Len()) + data := C.calloc(size+1, 1) + C.memcpy(data, util.Ptr(key.data), size) + return data +} + +// Clone creates a key as a copy of another one. +func (key *Key) Clone() (*Key, error) { + newKey, err := NewBlankKey(key.Len()) + if err != nil { + return nil, err + } + copy(newKey.data, key.data) + return newKey, nil +} + +// NewKeyFromCString creates of a copy of some C string's data in a key. Note +// that the original C string is not modified at all, so steps must be taken to +// ensure that this original copy is secured. +func NewKeyFromCString(str unsafe.Pointer) (*Key, error) { + size := C.strlen((*C.char)(str)) + key, err := NewBlankKey(int(size)) + if err != nil { + return nil, err + } + C.memcpy(util.Ptr(key.data), str, size) + return key, nil +} + +// NewKeyFromReader constructs a key of arbitrary length by reading from reader +// until hitting EOF. +func NewKeyFromReader(reader io.Reader) (*Key, error) { + // Use an initial key size of a page. As Mmap allocates a page anyway, + // there isn't much additional overhead from starting with a whole page. + key, err := NewBlankKey(os.Getpagesize()) + if err != nil { + return nil, err + } + + totalBytesRead := 0 + for { + bytesRead, err := reader.Read(key.data[totalBytesRead:]) + totalBytesRead += bytesRead + + switch err { + case nil: + // Need to continue reading. Grow key if necessary + if key.Len() == totalBytesRead { + if key, err = key.resize(2 * key.Len()); err != nil { + return nil, err + } + } + case io.EOF: + // Getting the EOF error means we are done + return key.resize(totalBytesRead) + default: + // Fail if Read() has a failure + key.Wipe() + return nil, err + } + } +} + +// NewFixedLengthKeyFromReader constructs a key with a specified length by +// reading exactly length bytes from reader. +func NewFixedLengthKeyFromReader(reader io.Reader, length int) (*Key, error) { + key, err := NewBlankKey(length) + if err != nil { + return nil, err + } + if _, err := io.ReadFull(reader, key.data); err != nil { + key.Wipe() + return nil, err + } + return key, nil +} + +var ( + // The recovery code is base32 with a dash between each block of 8 characters. + encoding = base32.StdEncoding + blockSize = 8 + separator = []byte("-") + encodedLength = encoding.EncodedLen(metadata.PolicyKeyLen) + decodedLength = encoding.DecodedLen(encodedLength) + // RecoveryCodeLength is the number of bytes in every recovery code + RecoveryCodeLength = (encodedLength/blockSize)*(blockSize+len(separator)) - len(separator) +) + +// WriteRecoveryCode outputs key's recovery code to the provided writer. +// WARNING: This recovery key is enough to derive the original key, so it must +// be given the same level of protection as a raw cryptographic key. +func WriteRecoveryCode(key *Key, writer io.Writer) error { + if err := util.CheckValidLength(metadata.PolicyKeyLen, key.Len()); err != nil { + return errors.Wrap(err, "recovery key") + } + + // We store the base32 encoded data (without separators) in a temp key + encodedKey, err := NewBlankKey(encodedLength) + if err != nil { + return err + } + defer encodedKey.Wipe() + encoding.Encode(encodedKey.data, key.data) + + w := util.NewErrWriter(writer) + + // Write the blocks with separators between them + w.Write(encodedKey.data[:blockSize]) + for blockStart := blockSize; blockStart < encodedLength; blockStart += blockSize { + w.Write(separator) + + blockEnd := util.MinInt(blockStart+blockSize, encodedLength) + w.Write(encodedKey.data[blockStart:blockEnd]) + } + + // If any writes have failed, return the error + return w.Err() +} + +// ReadRecoveryCode gets the recovery code from the provided reader and returns +// the corresponding cryptographic key. +// WARNING: This recovery key is enough to derive the original key, so it must +// be given the same level of protection as a raw cryptographic key. +func ReadRecoveryCode(reader io.Reader) (*Key, error) { + // We store the base32 encoded data (without separators) in a temp key + encodedKey, err := NewBlankKey(encodedLength) + if err != nil { + return nil, err + } + defer encodedKey.Wipe() + + r := util.NewErrReader(reader) + + // Read the other blocks, checking the separators between them + r.Read(encodedKey.data[:blockSize]) + inputSeparator := make([]byte, len(separator)) + + for blockStart := blockSize; blockStart < encodedLength; blockStart += blockSize { + r.Read(inputSeparator) + if r.Err() == nil && !bytes.Equal(separator, inputSeparator) { + err = errors.Wrapf(ErrRecoveryCode, "invalid separator %q", inputSeparator) + return nil, err + } + + blockEnd := util.MinInt(blockStart+blockSize, encodedLength) + r.Read(encodedKey.data[blockStart:blockEnd]) + } + + // If any reads have failed, return the error + if r.Err() != nil { + return nil, errors.Wrapf(ErrRecoveryCode, "read error %v", r.Err()) + } + + // Now we decode the key, resizing if necessary + decodedKey, err := NewBlankKey(decodedLength) + if err != nil { + return nil, err + } + if _, err = encoding.Decode(decodedKey.data, encodedKey.data); err != nil { + return nil, errors.Wrap(ErrRecoveryCode, err.Error()) + } + return decodedKey.resize(metadata.PolicyKeyLen) +} diff --git a/vendor/github.com/google/fscrypt/crypto/rand.go b/vendor/github.com/google/fscrypt/crypto/rand.go new file mode 100644 index 00000000000..7d1e55bf03b --- /dev/null +++ b/vendor/github.com/google/fscrypt/crypto/rand.go @@ -0,0 +1,98 @@ +/* + * rand.go - Reader used to generate secure random data for fscrypt. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package crypto + +import ( + "io" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// NewRandomBuffer uses the Linux Getrandom() syscall to create random bytes. If +// the operating system has insufficient randomness, the buffer creation will +// fail. This is an improvement over Go's built-in crypto/rand which will still +// return bytes if the system has insufficiency entropy. +// See: https://github.com/golang/go/issues/19274 +// +// While this syscall was only introduced in Kernel v3.17, it predates the +// introduction of filesystem encryption, so it introduces no additional +// compatibility issues. +func NewRandomBuffer(length int) ([]byte, error) { + buffer := make([]byte, length) + if _, err := io.ReadFull(randReader{}, buffer); err != nil { + return nil, err + } + return buffer, nil +} + +// NewRandomKey creates a random key of the specified length. This function uses +// the same random number generation process as NewRandomBuffer. +func NewRandomKey(length int) (*Key, error) { + return NewFixedLengthKeyFromReader(randReader{}, length) +} + +// NewRandomPassphrase creates a random passphrase of the specified length +// containing random alphabetic characters. +func NewRandomPassphrase(length int) (*Key, error) { + chars := []byte("abcdefghijklmnopqrstuvwxyz") + passphrase, err := NewBlankKey(length) + if err != nil { + return nil, err + } + for i := 0; i < length; { + // Get some random bytes. + raw, err := NewRandomKey((length - i) * 2) + if err != nil { + return nil, err + } + // Translate the random bytes into random characters. + for _, b := range raw.data { + if int(b) >= 256-(256%len(chars)) { + // Avoid bias towards the first characters in the list. + continue + } + c := chars[int(b)%len(chars)] + passphrase.data[i] = c + i++ + if i == length { + break + } + } + raw.Wipe() + } + return passphrase, nil +} + +// randReader just calls into Getrandom, so no internal data is needed. +type randReader struct{} + +func (r randReader) Read(buffer []byte) (int, error) { + n, err := unix.Getrandom(buffer, unix.GRND_NONBLOCK) + switch err { + case nil: + return n, nil + case unix.EAGAIN: + err = errors.New("insufficient entropy in pool") + case unix.ENOSYS: + err = errors.New("kernel must be v3.17 or later") + } + return 0, errors.Wrap(err, "getrandom() failed") +} diff --git a/vendor/github.com/google/fscrypt/filesystem/filesystem.go b/vendor/github.com/google/fscrypt/filesystem/filesystem.go new file mode 100644 index 00000000000..27bfa241565 --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/filesystem.go @@ -0,0 +1,1088 @@ +/* + * filesystem.go - Contains the functionality for a specific filesystem. This + * includes the commands to setup the filesystem, apply policies, and locate + * metadata. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package filesystem deals with the structure of the files on disk used to +// store the metadata for fscrypt. Specifically, this package includes: +// - mountpoint management (mountpoint.go) +// - querying existing mounted filesystems +// - getting filesystems from a UUID +// - finding the filesystem for a specific path +// - metadata organization (filesystem.go) +// - setting up a mounted filesystem for use with fscrypt +// - adding/querying/deleting metadata +// - making links to other filesystems' metadata +// - following links to get data from other filesystems +package filesystem + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/user" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// ErrAlreadySetup indicates that a filesystem is already setup for fscrypt. +type ErrAlreadySetup struct { + Mount *Mount +} + +func (err *ErrAlreadySetup) Error() string { + return fmt.Sprintf("filesystem %s is already setup for use with fscrypt", + err.Mount.Path) +} + +// ErrCorruptMetadata indicates that an fscrypt metadata file is corrupt. +type ErrCorruptMetadata struct { + Path string + UnderlyingError error +} + +func (err *ErrCorruptMetadata) Error() string { + return fmt.Sprintf("fscrypt metadata file at %q is corrupt: %s", + err.Path, err.UnderlyingError) +} + +// ErrFollowLink indicates that a protector link can't be followed. +type ErrFollowLink struct { + Link string + UnderlyingError error +} + +func (err *ErrFollowLink) Error() string { + return fmt.Sprintf("cannot follow filesystem link %q: %s", + err.Link, err.UnderlyingError) +} + +// ErrInsecurePermissions indicates that a filesystem is not considered to be +// setup for fscrypt because a metadata directory has insecure permissions. +type ErrInsecurePermissions struct { + Path string +} + +func (err *ErrInsecurePermissions) Error() string { + return fmt.Sprintf("%q has insecure permissions (world-writable without sticky bit)", + err.Path) +} + +// ErrMakeLink indicates that a protector link can't be created. +type ErrMakeLink struct { + Target *Mount + UnderlyingError error +} + +func (err *ErrMakeLink) Error() string { + return fmt.Sprintf("cannot create filesystem link to %q: %s", + err.Target.Path, err.UnderlyingError) +} + +// ErrMountOwnedByAnotherUser indicates that the mountpoint root directory is +// owned by a user that isn't trusted in the current context, so we don't +// consider fscrypt to be properly setup on the filesystem. +type ErrMountOwnedByAnotherUser struct { + Mount *Mount +} + +func (err *ErrMountOwnedByAnotherUser) Error() string { + return fmt.Sprintf("another non-root user owns the root directory of %s", err.Mount.Path) +} + +// ErrNoCreatePermission indicates that the current user lacks permission to +// create fscrypt metadata on the given filesystem. +type ErrNoCreatePermission struct { + Mount *Mount +} + +func (err *ErrNoCreatePermission) Error() string { + return fmt.Sprintf("user lacks permission to create fscrypt metadata on %s", err.Mount.Path) +} + +// ErrNotAMountpoint indicates that a path is not a mountpoint. +type ErrNotAMountpoint struct { + Path string +} + +func (err *ErrNotAMountpoint) Error() string { + return fmt.Sprintf("%q is not a mountpoint", err.Path) +} + +// ErrNotSetup indicates that a filesystem is not setup for fscrypt. +type ErrNotSetup struct { + Mount *Mount +} + +func (err *ErrNotSetup) Error() string { + return fmt.Sprintf("filesystem %s is not setup for use with fscrypt", err.Mount.Path) +} + +// ErrSetupByAnotherUser indicates that one or more of the fscrypt metadata +// directories is owned by a user that isn't trusted in the current context, so +// we don't consider fscrypt to be properly setup on the filesystem. +type ErrSetupByAnotherUser struct { + Mount *Mount +} + +func (err *ErrSetupByAnotherUser) Error() string { + return fmt.Sprintf("another non-root user owns fscrypt metadata directories on %s", err.Mount.Path) +} + +// ErrSetupNotSupported indicates that the given filesystem type is not +// supported for fscrypt setup. +type ErrSetupNotSupported struct { + Mount *Mount +} + +func (err *ErrSetupNotSupported) Error() string { + return fmt.Sprintf("filesystem type %s is not supported for fscrypt setup", + err.Mount.FilesystemType) +} + +// ErrPolicyNotFound indicates that the policy metadata was not found. +type ErrPolicyNotFound struct { + Descriptor string + Mount *Mount +} + +func (err *ErrPolicyNotFound) Error() string { + return fmt.Sprintf("policy metadata for %s not found on filesystem %s", + err.Descriptor, err.Mount.Path) +} + +// ErrProtectorNotFound indicates that the protector metadata was not found. +type ErrProtectorNotFound struct { + Descriptor string + Mount *Mount +} + +func (err *ErrProtectorNotFound) Error() string { + return fmt.Sprintf("protector metadata for %s not found on filesystem %s", + err.Descriptor, err.Mount.Path) +} + +// SortDescriptorsByLastMtime indicates whether descriptors are sorted by last +// modification time when being listed. This can be set to true to get +// consistent output for testing. +var SortDescriptorsByLastMtime = false + +// Mount contains information for a specific mounted filesystem. +// Path - Absolute path where the directory is mounted +// FilesystemType - Type of the mounted filesystem, e.g. "ext4" +// Device - Device for filesystem (empty string if we cannot find one) +// DeviceNumber - Device number of the filesystem. This is set even if +// Device isn't, since all filesystems have a device +// number assigned by the kernel, even pseudo-filesystems. +// Subtree - The mounted subtree of the filesystem. This is usually +// "/", meaning that the entire filesystem is mounted, but +// it can differ for bind mounts. +// ReadOnly - True if this is a read-only mount +// +// In order to use a Mount to store fscrypt metadata, some directories must be +// setup first. Specifically, the directories created look like: +// +// └── .fscrypt +// ├── policies +// └── protectors +// +// These "policies" and "protectors" directories will contain files that are +// the corresponding metadata structures for policies and protectors. The public +// interface includes functions for setting up these directories and Adding, +// Getting, and Removing these files. +// +// There is also the ability to reference another filesystem's metadata. This is +// used when a Policy on filesystem A is protected with Protector on filesystem +// B. In this scenario, we store a "link file" in the protectors directory. +// +// We also allow ".fscrypt" to be a symlink which was previously created. This +// allows login protectors to be created when the root filesystem is read-only, +// provided that "/.fscrypt" is a symlink pointing to a writable location. +type Mount struct { + Path string + FilesystemType string + Device string + DeviceNumber DeviceNumber + Subtree string + ReadOnly bool +} + +// PathSorter allows mounts to be sorted by Path. +type PathSorter []*Mount + +func (p PathSorter) Len() int { return len(p) } +func (p PathSorter) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p PathSorter) Less(i, j int) bool { return p[i].Path < p[j].Path } + +const ( + // Names of the various directories used in fscrypt + baseDirName = ".fscrypt" + policyDirName = "policies" + protectorDirName = "protectors" + tempPrefix = ".tmp" + linkFileExtension = ".link" + + // The base directory should be read-only (except for the creator) + basePermissions = 0755 + + // The metadata files shouldn't be readable or writable by other users. + // Having them be world-readable wouldn't necessarily be a huge issue, + // but given that some of these files contain (strong) password hashes, + // we error on the side of caution -- similar to /etc/shadow. + // Note: existing files on-disk might have mode 0644, as that was the + // mode used by fscrypt v0.3.2 and earlier. + filePermissions = os.FileMode(0600) + + // Maximum size of a metadata file. This value is arbitrary, and it can + // be changed. We just set a reasonable limit that shouldn't be reached + // in practice, except by users trying to cause havoc by creating + // extremely large files in the metadata directories. + maxMetadataFileSize = 16384 +) + +// SetupMode is a mode for creating the fscrypt metadata directories. +type SetupMode int + +const ( + // SingleUserWritable specifies to make the fscrypt metadata directories + // writable by a single user (usually root) only. + SingleUserWritable SetupMode = iota + // WorldWritable specifies to make the fscrypt metadata directories + // world-writable (with the sticky bit set). + WorldWritable +) + +func (m *Mount) String() string { + return fmt.Sprintf(`%s + FilesystemType: %s + Device: %s`, m.Path, m.FilesystemType, m.Device) +} + +// BaseDir returns the path to the base fscrypt directory for this filesystem. +func (m *Mount) BaseDir() string { + rawBaseDir := filepath.Join(m.Path, baseDirName) + // We allow the base directory to be a symlink, but some callers need + // the real path, so dereference the symlink here if needed. Since the + // directory the symlink points to may not exist yet, we have to read + // the symlink manually rather than use filepath.EvalSymlinks. + target, err := os.Readlink(rawBaseDir) + if err != nil { + return rawBaseDir // not a symlink + } + if filepath.IsAbs(target) { + return target + } + return filepath.Join(m.Path, target) +} + +// ProtectorDir returns the directory containing the protector metadata. +func (m *Mount) ProtectorDir() string { + return filepath.Join(m.BaseDir(), protectorDirName) +} + +// protectorPath returns the full path to a regular protector file with the +// specified descriptor. +func (m *Mount) protectorPath(descriptor string) string { + return filepath.Join(m.ProtectorDir(), descriptor) +} + +// linkedProtectorPath returns the full path to a linked protector file with the +// specified descriptor. +func (m *Mount) linkedProtectorPath(descriptor string) string { + return m.protectorPath(descriptor) + linkFileExtension +} + +// PolicyDir returns the directory containing the policy metadata. +func (m *Mount) PolicyDir() string { + return filepath.Join(m.BaseDir(), policyDirName) +} + +// PolicyPath returns the full path to a regular policy file with the +// specified descriptor. +func (m *Mount) PolicyPath(descriptor string) string { + return filepath.Join(m.PolicyDir(), descriptor) +} + +// tempMount creates a temporary directory alongside this Mount's base fscrypt +// directory and returns a temporary Mount which represents this temporary +// directory. The caller is responsible for removing this temporary directory. +func (m *Mount) tempMount() (*Mount, error) { + tempDir, err := ioutil.TempDir(filepath.Dir(m.BaseDir()), tempPrefix) + return &Mount{Path: tempDir}, err +} + +// ErrEncryptionNotEnabled indicates that encryption is not enabled on the given +// filesystem. +type ErrEncryptionNotEnabled struct { + Mount *Mount +} + +func (err *ErrEncryptionNotEnabled) Error() string { + return fmt.Sprintf("encryption not enabled on filesystem %s (%s).", + err.Mount.Path, err.Mount.Device) +} + +// ErrEncryptionNotSupported indicates that encryption is not supported on the +// given filesystem. +type ErrEncryptionNotSupported struct { + Mount *Mount +} + +func (err *ErrEncryptionNotSupported) Error() string { + return fmt.Sprintf("This kernel doesn't support encryption on %s filesystems.", + err.Mount.FilesystemType) +} + +// EncryptionSupportError adds filesystem-specific context to the +// ErrEncryptionNotEnabled and ErrEncryptionNotSupported errors from the +// metadata package. +func (m *Mount) EncryptionSupportError(err error) error { + switch err { + case metadata.ErrEncryptionNotEnabled: + return &ErrEncryptionNotEnabled{m} + case metadata.ErrEncryptionNotSupported: + return &ErrEncryptionNotSupported{m} + } + return err +} + +// isFscryptSetupAllowed decides whether the given filesystem is allowed to be +// set up for fscrypt, without actually accessing it. This basically checks +// whether the filesystem type is one of the types that supports encryption, or +// at least is in some stage of planning for encrption support in the future. +// +// We need this list so that we can skip filesystems that are irrelevant for +// fscrypt without having to look for the fscrypt metadata directories on them, +// which can trigger errors, long delays, or side effects on some filesystems. +// +// Unfortunately, this means that if a completely new filesystem adds encryption +// support, then it will need to be manually added to this list. But it seems +// to be a worthwhile tradeoff to avoid the above issues. +func (m *Mount) isFscryptSetupAllowed() bool { + if m.Path == "/" { + // The root filesystem is always allowed, since it's where login + // protectors are stored. + return true + } + switch m.FilesystemType { + case "ext4", "f2fs", "ubifs", "btrfs", "ceph", "xfs": + return true + default: + return false + } +} + +// CheckSupport returns an error if this filesystem does not support encryption. +func (m *Mount) CheckSupport() error { + if !m.isFscryptSetupAllowed() { + return &ErrEncryptionNotSupported{m} + } + return m.EncryptionSupportError(metadata.CheckSupport(m.Path)) +} + +func checkOwnership(path string, info os.FileInfo, trustedUser *user.User) bool { + if trustedUser == nil { + return true + } + trustedUID := uint32(util.AtoiOrPanic(trustedUser.Uid)) + actualUID := info.Sys().(*syscall.Stat_t).Uid + if actualUID != 0 && actualUID != trustedUID { + log.Printf("WARNING: %q is owned by uid %d, but expected %d or 0", + path, actualUID, trustedUID) + return false + } + return true +} + +// CheckSetup returns an error if any of the fscrypt metadata directories do not +// exist. Will log any unexpected errors or incorrect permissions. +func (m *Mount) CheckSetup(trustedUser *user.User) error { + if !m.isFscryptSetupAllowed() { + return &ErrNotSetup{m} + } + // Check that the mountpoint directory itself is not a symlink and has + // proper ownership, as otherwise we can't trust anything beneath it. + info, err := loggedLstat(m.Path) + if err != nil { + return &ErrNotSetup{m} + } + if (info.Mode() & os.ModeSymlink) != 0 { + log.Printf("mountpoint directory %q cannot be a symlink", m.Path) + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("mountpoint %q is not a directory", m.Path) + return &ErrNotSetup{m} + } + if !checkOwnership(m.Path, info, trustedUser) { + return &ErrMountOwnedByAnotherUser{m} + } + + // Check BaseDir similarly. However, unlike the other directories, we + // allow BaseDir to be a symlink, to support the use case of metadata + // for a read-only filesystem being redirected to a writable location. + info, err = loggedStat(m.BaseDir()) + if err != nil { + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("%q is not a directory", m.BaseDir()) + return &ErrNotSetup{m} + } + if !checkOwnership(m.Path, info, trustedUser) { + return &ErrMountOwnedByAnotherUser{m} + } + + // Check that the policies and protectors directories aren't symlinks and + // have proper ownership. + subdirs := []string{m.PolicyDir(), m.ProtectorDir()} + for _, path := range subdirs { + info, err := loggedLstat(path) + if err != nil { + return &ErrNotSetup{m} + } + if (info.Mode() & os.ModeSymlink) != 0 { + log.Printf("directory %q cannot be a symlink", path) + return &ErrNotSetup{m} + } + if !info.IsDir() { + log.Printf("%q is not a directory", path) + return &ErrNotSetup{m} + } + // We are no longer too picky about the mode, given that + // 'fscrypt setup' now offers a choice of two different modes, + // and system administrators could customize it further. + // However, we can at least verify that if the directory is + // world-writable, then the sticky bit is also set. + if info.Mode()&(os.ModeSticky|0002) == 0002 { + log.Printf("%q is world-writable but doesn't have sticky bit set", path) + return &ErrInsecurePermissions{path} + } + if !checkOwnership(path, info, trustedUser) { + return &ErrSetupByAnotherUser{m} + } + } + return nil +} + +// makeDirectories creates the three metadata directories with the correct +// permissions. Note that this function overrides the umask. +func (m *Mount) makeDirectories(setupMode SetupMode) error { + // Zero the umask so we get the permissions we want + oldMask := unix.Umask(0) + defer func() { + unix.Umask(oldMask) + }() + + if err := os.Mkdir(m.BaseDir(), basePermissions); err != nil { + return err + } + + var dirMode os.FileMode + switch setupMode { + case SingleUserWritable: + dirMode = 0755 + case WorldWritable: + dirMode = os.ModeSticky | 0777 + } + if err := os.Mkdir(m.PolicyDir(), dirMode); err != nil { + return err + } + return os.Mkdir(m.ProtectorDir(), dirMode) +} + +// GetSetupMode returns the current mode for fscrypt metadata creation on this +// filesystem. +func (m *Mount) GetSetupMode() (SetupMode, *user.User, error) { + info1, err1 := os.Stat(m.PolicyDir()) + info2, err2 := os.Stat(m.ProtectorDir()) + + if err1 == nil && err2 == nil { + mask := os.ModeSticky | 0777 + mode1 := info1.Mode() & mask + mode2 := info2.Mode() & mask + uid1 := info1.Sys().(*syscall.Stat_t).Uid + uid2 := info2.Sys().(*syscall.Stat_t).Uid + user, err := util.UserFromUID(int64(uid1)) + if err == nil && mode1 == mode2 && uid1 == uid2 { + switch mode1 { + case mask: + return WorldWritable, nil, nil + case 0755: + return SingleUserWritable, user, nil + } + } + log.Printf("filesystem %s uses custom permissions on metadata directories", m.Path) + } + return -1, nil, errors.New("unable to determine setup mode") +} + +// Setup sets up the filesystem for use with fscrypt. Note that this merely +// creates the appropriate files on the filesystem. It does not actually modify +// the filesystem's feature flags. This operation is atomic; it either succeeds +// or no files in the baseDir are created. +func (m *Mount) Setup(mode SetupMode) error { + if m.CheckSetup(nil) == nil { + return &ErrAlreadySetup{m} + } + if !m.isFscryptSetupAllowed() { + return &ErrSetupNotSupported{m} + } + // We build the directories under a temp Mount and then move into place. + temp, err := m.tempMount() + if err != nil { + return err + } + defer os.RemoveAll(temp.Path) + + if err = temp.makeDirectories(mode); err != nil { + return err + } + + // Atomically move directory into place. + return os.Rename(temp.BaseDir(), m.BaseDir()) +} + +// RemoveAllMetadata removes all the policy and protector metadata from the +// filesystem. This operation is atomic; it either succeeds or no files in the +// baseDir are removed. +// WARNING: Will cause data loss if the metadata is used to encrypt +// directories (this could include directories on other filesystems). +func (m *Mount) RemoveAllMetadata() error { + if err := m.CheckSetup(nil); err != nil { + return err + } + // temp will hold the old metadata temporarily + temp, err := m.tempMount() + if err != nil { + return err + } + defer os.RemoveAll(temp.Path) + + // Move directory into temp (to be destroyed on defer) + return os.Rename(m.BaseDir(), temp.BaseDir()) +} + +func syncDirectory(dirPath string) error { + dirFile, err := os.Open(dirPath) + if err != nil { + return err + } + if err = dirFile.Sync(); err != nil { + dirFile.Close() + return err + } + return dirFile.Close() +} + +func (m *Mount) overwriteDataNonAtomic(path string, data []byte) error { + file, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + if _, err = file.Write(data); err != nil { + log.Printf("WARNING: overwrite of %q failed; file will be corrupted!", path) + file.Close() + return err + } + if err = file.Sync(); err != nil { + file.Close() + return err + } + if err = file.Close(); err != nil { + return err + } + log.Printf("successfully overwrote %q non-atomically", path) + return nil +} + +// writeData writes the given data to the given path such that, if possible, the +// data is either written to stable storage or an error is returned. If a file +// already exists at the path, it will be replaced. +// +// However, if the process doesn't have write permission to the directory but +// does have write permission to the file itself, then as a fallback the file is +// overwritten in-place rather than replaced. Note that this may be non-atomic. +func (m *Mount) writeData(path string, data []byte, owner *user.User, mode os.FileMode) error { + // Write the data to a temporary file, sync it, then rename into place + // so that the operation will be atomic. + dirPath := filepath.Dir(path) + tempFile, err := ioutil.TempFile(dirPath, tempPrefix) + if err != nil { + log.Print(err) + if os.IsPermission(err) { + if _, err = os.Lstat(path); err == nil { + log.Printf("trying non-atomic overwrite of %q", path) + return m.overwriteDataNonAtomic(path, data) + } + return &ErrNoCreatePermission{m} + } + return err + } + defer os.Remove(tempFile.Name()) + + // Ensure the new file has the right permissions mask. + if err = tempFile.Chmod(mode); err != nil { + tempFile.Close() + return err + } + // Override the file owner if one was specified. This happens when root + // needs to create files owned by a particular user. + if owner != nil { + if err = util.Chown(tempFile, owner); err != nil { + log.Printf("could not set owner of %q to %v: %v", + path, owner.Username, err) + tempFile.Close() + return err + } + } + if _, err = tempFile.Write(data); err != nil { + tempFile.Close() + return err + } + if err = tempFile.Sync(); err != nil { + tempFile.Close() + return err + } + if err = tempFile.Close(); err != nil { + return err + } + + if err = os.Rename(tempFile.Name(), path); err != nil { + return err + } + // Ensure the rename has been persisted before returning success. + return syncDirectory(dirPath) +} + +// addMetadata writes the metadata structure to the file with the specified +// path. This will overwrite any existing data. The operation is atomic. +func (m *Mount) addMetadata(path string, md metadata.Metadata, owner *user.User) error { + if err := md.CheckValidity(); err != nil { + return errors.Wrap(err, "provided metadata is invalid") + } + + data, err := proto.Marshal(md) + if err != nil { + return err + } + + mode := filePermissions + // If the file already exists, then preserve its owner and mode if + // possible. This is necessary because by default, for atomicity + // reasons we'll replace the file rather than overwrite it. + info, err := os.Lstat(path) + if err == nil { + if owner == nil && util.IsUserRoot() { + uid := info.Sys().(*syscall.Stat_t).Uid + if owner, err = util.UserFromUID(int64(uid)); err != nil { + log.Print(err) + } + } + mode = info.Mode() & 0777 + } else if !os.IsNotExist(err) { + log.Print(err) + } + + if owner != nil { + log.Printf("writing metadata to %q and setting owner to %s", path, owner.Username) + } else { + log.Printf("writing metadata to %q", path) + } + return m.writeData(path, data, owner, mode) +} + +// readMetadataFileSafe gets the contents of a metadata file extra-carefully, +// considering that it could be a malicious file created to cause a +// denial-of-service. Specifically, the following checks are done: +// +// - It must be a regular file, not another type of file like a symlink or FIFO. +// (Symlinks aren't bad by themselves, but given that a malicious user could +// point one to absolutely anywhere, and there is no known use case for the +// metadata files themselves being symlinks, it seems best to disallow them.) +// - It must have a reasonable size (<= maxMetadataFileSize). +// - If trustedUser is non-nil, then the file must be owned by the given user +// or by root. +// +// Take care to avoid TOCTOU (time-of-check-time-of-use) bugs when doing these +// tests. Notably, we must open the file before checking the file type, as the +// file type could change between any previous checks and the open. When doing +// this, O_NOFOLLOW is needed to avoid following a symlink (this applies to the +// last path component only), and O_NONBLOCK is needed to avoid blocking if the +// file is a FIFO. +// +// This function returns the data read as well as the UID of the user who owns +// the file. The returned UID is needed for login protectors, where the UID +// needs to be cross-checked with the UID stored in the file itself. +func readMetadataFileSafe(path string, trustedUser *user.User) ([]byte, int64, error) { + file, err := os.OpenFile(path, os.O_RDONLY|unix.O_NOFOLLOW|unix.O_NONBLOCK, 0) + if err != nil { + return nil, -1, err + } + defer file.Close() + + info, err := file.Stat() + if err != nil { + return nil, -1, err + } + if !info.Mode().IsRegular() { + return nil, -1, &ErrCorruptMetadata{path, errors.New("not a regular file")} + } + if !checkOwnership(path, info, trustedUser) { + return nil, -1, &ErrCorruptMetadata{path, errors.New("metadata file belongs to another user")} + } + // Clear O_NONBLOCK, since it has served its purpose when opening the + // file, and the behavior of reading from a regular file with O_NONBLOCK + // is technically unspecified. + if _, err = unix.FcntlInt(file.Fd(), unix.F_SETFL, 0); err != nil { + return nil, -1, &os.PathError{Op: "clearing O_NONBLOCK", Path: path, Err: err} + } + // Read the file contents, allowing at most maxMetadataFileSize bytes. + reader := &io.LimitedReader{R: file, N: maxMetadataFileSize + 1} + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, -1, err + } + if reader.N == 0 { + return nil, -1, &ErrCorruptMetadata{path, errors.New("metadata file size limit exceeded")} + } + return data, int64(info.Sys().(*syscall.Stat_t).Uid), nil +} + +// getMetadata reads the metadata structure from the file with the specified +// path. Only reads normal metadata files, not linked metadata. +func (m *Mount) getMetadata(path string, trustedUser *user.User, md metadata.Metadata) (int64, error) { + data, owner, err := readMetadataFileSafe(path, trustedUser) + if err != nil { + log.Printf("could not read metadata from %q: %v", path, err) + return -1, err + } + + if err := proto.Unmarshal(data, md); err != nil { + return -1, &ErrCorruptMetadata{path, err} + } + + if err := md.CheckValidity(); err != nil { + return -1, &ErrCorruptMetadata{path, err} + } + + log.Printf("successfully read metadata from %q", path) + return owner, nil +} + +// removeMetadata deletes the metadata struct from the file with the specified +// path. Works with regular or linked metadata. +func (m *Mount) removeMetadata(path string) error { + if err := os.Remove(path); err != nil { + log.Printf("could not remove metadata file at %q: %v", path, err) + return err + } + + log.Printf("successfully removed metadata file at %q", path) + return nil +} + +// AddProtector adds the protector metadata to this filesystem's storage. This +// will overwrite the value of an existing protector with this descriptor. This +// will fail with ErrLinkedProtector if a linked protector with this descriptor +// already exists on the filesystem. +func (m *Mount) AddProtector(data *metadata.ProtectorData, owner *user.User) error { + var err error + if err = m.CheckSetup(nil); err != nil { + return err + } + if isRegularFile(m.linkedProtectorPath(data.ProtectorDescriptor)) { + return errors.Errorf("cannot modify linked protector %s on filesystem %s", + data.ProtectorDescriptor, m.Path) + } + path := m.protectorPath(data.ProtectorDescriptor) + return m.addMetadata(path, data, owner) +} + +// AddLinkedProtector adds a link in this filesystem to the protector metadata +// in the dest filesystem, if one doesn't already exist. On success, the return +// value is a nil error and a bool that is true iff the link is newly created. +func (m *Mount) AddLinkedProtector(descriptor string, dest *Mount, trustedUser *user.User, + ownerIfCreating *user.User) (bool, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return false, err + } + // Check that the link is good (descriptor exists, filesystem has UUID). + if _, err := dest.GetRegularProtector(descriptor, trustedUser); err != nil { + return false, err + } + + linkPath := m.linkedProtectorPath(descriptor) + + // Check whether the link already exists. + existingLink, _, err := readMetadataFileSafe(linkPath, trustedUser) + if err == nil { + existingLinkedMnt, err := getMountFromLink(string(existingLink)) + if err != nil { + return false, errors.Wrap(err, linkPath) + } + if existingLinkedMnt != dest { + return false, errors.Errorf("link %q points to %q, but expected %q", + linkPath, existingLinkedMnt.Path, dest.Path) + } + return false, nil + } + if !os.IsNotExist(err) { + return false, err + } + + var newLink string + newLink, err = makeLink(dest) + if err != nil { + return false, err + } + return true, m.writeData(linkPath, []byte(newLink), ownerIfCreating, filePermissions) +} + +// GetRegularProtector looks up the protector metadata by descriptor. This will +// fail with ErrProtectorNotFound if the descriptor is a linked protector. +func (m *Mount) GetRegularProtector(descriptor string, trustedUser *user.User) (*metadata.ProtectorData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, err + } + data := new(metadata.ProtectorData) + path := m.protectorPath(descriptor) + owner, err := m.getMetadata(path, trustedUser, data) + if os.IsNotExist(err) { + err = &ErrProtectorNotFound{descriptor, m} + } + if err != nil { + return nil, err + } + // Login protectors have their UID stored in the file. Since normally + // any user can create files in the fscrypt metadata directories, for a + // login protector to be considered valid it *must* be owned by the + // claimed user or by root. Note: fscrypt v0.3.2 and later always makes + // login protectors owned by the user, but previous versions could + // create them owned by root -- that is the main reason we allow root. + if data.Source == metadata.SourceType_pam_passphrase && owner != 0 && owner != data.Uid { + log.Printf("WARNING: %q claims to be the login protector for uid %d, but it is owned by uid %d. Needs to be %d or 0.", + path, data.Uid, owner, data.Uid) + return nil, &ErrCorruptMetadata{path, errors.New("login protector belongs to wrong user")} + } + return data, nil +} + +// GetProtector returns the Mount of the filesystem containing the information +// and that protector's data. If the descriptor is a regular (not linked) +// protector, the mount will return itself. +func (m *Mount) GetProtector(descriptor string, trustedUser *user.User) (*Mount, *metadata.ProtectorData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, nil, err + } + // Get the link data from the link file + path := m.linkedProtectorPath(descriptor) + link, _, err := readMetadataFileSafe(path, trustedUser) + if err != nil { + // If the link doesn't exist, try for a regular protector. + if os.IsNotExist(err) { + data, err := m.GetRegularProtector(descriptor, trustedUser) + return m, data, err + } + return nil, nil, err + } + log.Printf("following protector link %s", path) + linkedMnt, err := getMountFromLink(string(link)) + if err != nil { + return nil, nil, errors.Wrap(err, path) + } + data, err := linkedMnt.GetRegularProtector(descriptor, trustedUser) + if err != nil { + return nil, nil, &ErrFollowLink{string(link), err} + } + return linkedMnt, data, nil +} + +// RemoveProtector deletes the protector metadata (or a link to another +// filesystem's metadata) from the filesystem storage. +func (m *Mount) RemoveProtector(descriptor string) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + // We first try to remove the linkedProtector. If that metadata does not + // exist, we try to remove the normal protector. + err := m.removeMetadata(m.linkedProtectorPath(descriptor)) + if os.IsNotExist(err) { + err = m.removeMetadata(m.protectorPath(descriptor)) + if os.IsNotExist(err) { + err = &ErrProtectorNotFound{descriptor, m} + } + } + return err +} + +// ListProtectors lists the descriptors of all protectors on this filesystem. +// This does not include linked protectors. If trustedUser is non-nil, then +// the protectors are restricted to those owned by the given user or by root. +func (m *Mount) ListProtectors(trustedUser *user.User) ([]string, error) { + return m.listMetadata(m.ProtectorDir(), "protectors", trustedUser) +} + +// AddPolicy adds the policy metadata to the filesystem storage. +func (m *Mount) AddPolicy(data *metadata.PolicyData, owner *user.User) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + + return m.addMetadata(m.PolicyPath(data.KeyDescriptor), data, owner) +} + +// GetPolicy looks up the policy metadata by descriptor. +func (m *Mount) GetPolicy(descriptor string, trustedUser *user.User) (*metadata.PolicyData, error) { + if err := m.CheckSetup(trustedUser); err != nil { + return nil, err + } + data := new(metadata.PolicyData) + _, err := m.getMetadata(m.PolicyPath(descriptor), trustedUser, data) + if os.IsNotExist(err) { + err = &ErrPolicyNotFound{descriptor, m} + } + return data, err +} + +// RemovePolicy deletes the policy metadata from the filesystem storage. +func (m *Mount) RemovePolicy(descriptor string) error { + if err := m.CheckSetup(nil); err != nil { + return err + } + err := m.removeMetadata(m.PolicyPath(descriptor)) + if os.IsNotExist(err) { + err = &ErrPolicyNotFound{descriptor, m} + } + return err +} + +// ListPolicies lists the descriptors of all policies on this filesystem. If +// trustedUser is non-nil, then the policies are restricted to those owned by +// the given user or by root. +func (m *Mount) ListPolicies(trustedUser *user.User) ([]string, error) { + return m.listMetadata(m.PolicyDir(), "policies", trustedUser) +} + +type namesAndTimes struct { + names []string + times []time.Time +} + +func (c namesAndTimes) Len() int { + return len(c.names) +} + +func (c namesAndTimes) Less(i, j int) bool { + return c.times[i].Before(c.times[j]) +} + +func (c namesAndTimes) Swap(i, j int) { + c.names[i], c.names[j] = c.names[j], c.names[i] + c.times[i], c.times[j] = c.times[j], c.times[i] +} + +func sortFileListByLastMtime(directoryPath string, names []string) error { + c := namesAndTimes{names: names, times: make([]time.Time, len(names))} + for i, name := range names { + fi, err := os.Lstat(filepath.Join(directoryPath, name)) + if err != nil { + return err + } + c.times[i] = fi.ModTime() + } + sort.Sort(c) + return nil +} + +// listDirectory returns a list of descriptors for a metadata directory, +// including files which are links to other filesystem's metadata. +func (m *Mount) listDirectory(directoryPath string) ([]string, error) { + dir, err := os.Open(directoryPath) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + + if SortDescriptorsByLastMtime { + if err := sortFileListByLastMtime(directoryPath, names); err != nil { + return nil, err + } + } + + descriptors := make([]string, 0, len(names)) + for _, name := range names { + // Be sure to include links as well + descriptors = append(descriptors, strings.TrimSuffix(name, linkFileExtension)) + } + return descriptors, nil +} + +func (m *Mount) listMetadata(dirPath string, metadataType string, owner *user.User) ([]string, error) { + log.Printf("listing %s in %q", metadataType, dirPath) + if err := m.CheckSetup(owner); err != nil { + return nil, err + } + names, err := m.listDirectory(dirPath) + if err != nil { + return nil, err + } + filesIgnoredDescription := "" + if owner != nil { + filteredNames := make([]string, 0, len(names)) + uid := uint32(util.AtoiOrPanic(owner.Uid)) + for _, name := range names { + info, err := os.Lstat(filepath.Join(dirPath, name)) + if err != nil { + continue + } + fileUID := info.Sys().(*syscall.Stat_t).Uid + if fileUID != uid && fileUID != 0 { + continue + } + filteredNames = append(filteredNames, name) + } + numIgnored := len(names) - len(filteredNames) + if numIgnored != 0 { + filesIgnoredDescription = + fmt.Sprintf(" (ignored %d %s not owned by %s or root)", + numIgnored, metadataType, owner.Username) + } + names = filteredNames + } + log.Printf("found %d %s%s", len(names), metadataType, filesIgnoredDescription) + return names, nil +} diff --git a/vendor/github.com/google/fscrypt/filesystem/mountpoint.go b/vendor/github.com/google/fscrypt/filesystem/mountpoint.go new file mode 100644 index 00000000000..0b0693b2b6b --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/mountpoint.go @@ -0,0 +1,578 @@ +/* + * mountpoint.go - Contains all the functionality for finding mountpoints and + * using UUIDs to refer to them. Specifically, we can find the mountpoint of a + * path, get info about a mountpoint, and find mountpoints with a specific UUID. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package filesystem + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + // These maps hold data about the state of the system's filesystems. + // + // They only contain one Mount per filesystem, even if there are + // additional bind mounts, since we want to store fscrypt metadata in + // only one place per filesystem. When it is ambiguous which Mount + // should be used for a filesystem, mountsByDevice will contain an + // explicit nil entry, and mountsByPath won't contain an entry. + mountsByDevice map[DeviceNumber]*Mount + mountsByPath map[string]*Mount + // Used to make the mount functions thread safe + mountMutex sync.Mutex + // True if the maps have been successfully initialized. + mountsInitialized bool + // Supported tokens for filesystem links + uuidToken = "UUID" + pathToken = "PATH" + // Location to perform UUID lookup + uuidDirectory = "/dev/disk/by-uuid" +) + +// Unescape octal-encoded escape sequences in a string from the mountinfo file. +// The kernel encodes the ' ', '\t', '\n', and '\\' bytes this way. This +// function exactly inverts what the kernel does, including by preserving +// invalid UTF-8. +func unescapeString(str string) string { + var sb strings.Builder + for i := 0; i < len(str); i++ { + b := str[i] + if b == '\\' && i+3 < len(str) { + if parsed, err := strconv.ParseInt(str[i+1:i+4], 8, 8); err == nil { + b = uint8(parsed) + i += 3 + } + } + sb.WriteByte(b) + } + return sb.String() +} + +// EscapeString is the reverse of unescapeString. Use this to avoid injecting +// spaces or newlines into output that uses these characters as separators. +func EscapeString(str string) string { + var sb strings.Builder + for _, b := range []byte(str) { + switch b { + case ' ', '\t', '\n', '\\': + sb.WriteString(fmt.Sprintf("\\%03o", b)) + default: + sb.WriteByte(b) + } + } + return sb.String() +} + +// We get the device name via the device number rather than use the mount source +// field directly. This is necessary to handle a rootfs that was mounted via +// the kernel command line, since mountinfo always shows /dev/root for that. +// This assumes that the device nodes are in the standard location. +func getDeviceName(num DeviceNumber) string { + linkPath := fmt.Sprintf("/sys/dev/block/%v", num) + if target, err := os.Readlink(linkPath); err == nil { + return fmt.Sprintf("/dev/%s", filepath.Base(target)) + } + return "" +} + +// Parse one line of /proc/self/mountinfo. +// +// The line contains the following space-separated fields: +// [0] mount ID +// [1] parent ID +// [2] major:minor +// [3] root +// [4] mount point +// [5] mount options +// [6...n-1] optional field(s) +// [n] separator +// [n+1] filesystem type +// [n+2] mount source +// [n+3] super options +// +// For more details, see https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func parseMountInfoLine(line string) *Mount { + fields := strings.Split(line, " ") + if len(fields) < 10 { + return nil + } + + // Count the optional fields. In case new fields are appended later, + // don't simply assume that n == len(fields) - 4. + n := 6 + for fields[n] != "-" { + n++ + if n >= len(fields) { + return nil + } + } + if n+3 >= len(fields) { + return nil + } + + var mnt *Mount = &Mount{} + var err error + mnt.DeviceNumber, err = newDeviceNumberFromString(fields[2]) + if err != nil { + return nil + } + mnt.Subtree = unescapeString(fields[3]) + mnt.Path = unescapeString(fields[4]) + for _, opt := range strings.Split(fields[5], ",") { + if opt == "ro" { + mnt.ReadOnly = true + } + } + mnt.FilesystemType = unescapeString(fields[n+1]) + mnt.Device = getDeviceName(mnt.DeviceNumber) + return mnt +} + +type mountpointTreeNode struct { + mount *Mount + parent *mountpointTreeNode + children []*mountpointTreeNode +} + +func addUncontainedSubtreesRecursive(dst map[string]bool, + node *mountpointTreeNode, allUncontainedSubtrees map[string]bool) { + if allUncontainedSubtrees[node.mount.Subtree] { + dst[node.mount.Subtree] = true + } + for _, child := range node.children { + addUncontainedSubtreesRecursive(dst, child, allUncontainedSubtrees) + } +} + +// findMainMount finds the "main" Mount of a filesystem. The "main" Mount is +// where the filesystem's fscrypt metadata is stored. +// +// Normally, there is just one Mount and it's of the entire filesystem +// (mnt.Subtree == "/"). But in general, the filesystem might be mounted in +// multiple places, including "bind mounts" where mnt.Subtree != "/". Also, the +// filesystem might have a combination of read-write and read-only mounts. +// +// To handle most cases, we could just choose a mount with mnt.Subtree == "/", +// preferably a read-write mount. However, that doesn't work in containers +// where the "/" subtree might not be mounted. Here's a real-world example: +// +// mnt.Subtree mnt.Path +// ----------- -------- +// /var/lib/lxc/base/rootfs / +// /var/cache/pacman/pkg /var/cache/pacman/pkg +// /srv/repo/x86_64 /srv/http/x86_64 +// +// In this case, all mnt.Subtree are independent. To handle this case, we must +// choose the Mount whose mnt.Path contains the others, i.e. the first one. +// Note: the fscrypt metadata won't be usable from outside the container since +// it won't be at the real root of the filesystem, but that may be acceptable. +// +// However, we can't look *only* at mnt.Path, since in some cases mnt.Subtree is +// needed to correctly handle bind mounts. For example, in the following case, +// the first Mount should be chosen: +// +// mnt.Subtree mnt.Path +// ----------- -------- +// /foo /foo +// /foo/dir /dir +// +// To solve this, we divide the mounts into non-overlapping trees of mnt.Path. +// Then, we choose one of these trees which contains (exactly or via path +// prefix) *all* mnt.Subtree. We then return the root of this tree. In both +// the above examples, this algorithm returns the first Mount. +func findMainMount(filesystemMounts []*Mount) *Mount { + // Index this filesystem's mounts by path. Note: paths are unique here, + // since non-last mounts were already excluded earlier. + // + // Also build the set of all mounted subtrees. + filesystemMountsByPath := make(map[string]*mountpointTreeNode) + allSubtrees := make(map[string]bool) + for _, mnt := range filesystemMounts { + filesystemMountsByPath[mnt.Path] = &mountpointTreeNode{mount: mnt} + allSubtrees[mnt.Subtree] = true + } + + // Divide the mounts into non-overlapping trees of mountpoints. + for path, mntNode := range filesystemMountsByPath { + for path != "/" && mntNode.parent == nil { + path = filepath.Dir(path) + if parent := filesystemMountsByPath[path]; parent != nil { + mntNode.parent = parent + parent.children = append(parent.children, mntNode) + } + } + } + + // Build the set of mounted subtrees that aren't contained in any other + // mounted subtree. + allUncontainedSubtrees := make(map[string]bool) + for subtree := range allSubtrees { + contained := false + for t := subtree; t != "/" && !contained; { + t = filepath.Dir(t) + contained = allSubtrees[t] + } + if !contained { + allUncontainedSubtrees[subtree] = true + } + } + + // Select the root of a mountpoint tree whose mounted subtrees contain + // *all* mounted subtrees. Equivalently, select a mountpoint tree in + // which every uncontained subtree is mounted. + var mainMount *Mount + for _, mntNode := range filesystemMountsByPath { + mnt := mntNode.mount + if mntNode.parent != nil { + continue + } + uncontainedSubtrees := make(map[string]bool) + addUncontainedSubtreesRecursive(uncontainedSubtrees, mntNode, allUncontainedSubtrees) + if len(uncontainedSubtrees) != len(allUncontainedSubtrees) { + continue + } + // If there's more than one eligible mount, they should have the + // same Subtree. Otherwise it's ambiguous which one to use. + if mainMount != nil && mainMount.Subtree != mnt.Subtree { + log.Printf("Unsupported case: %q (%v) has multiple non-overlapping mounts. This filesystem will be ignored!", + mnt.Device, mnt.DeviceNumber) + return nil + } + // Prefer a read-write mount to a read-only one. + if mainMount == nil || mainMount.ReadOnly { + mainMount = mnt + } + } + return mainMount +} + +// This is separate from loadMountInfo() only for unit testing. +func readMountInfo(r io.Reader) error { + mountsByDevice = make(map[DeviceNumber]*Mount) + mountsByPath = make(map[string]*Mount) + allMountsByDevice := make(map[DeviceNumber][]*Mount) + allMountsByPath := make(map[string]*Mount) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + mnt := parseMountInfoLine(line) + if mnt == nil { + log.Printf("ignoring invalid mountinfo line %q", line) + continue + } + + // We can only use mountpoints that are directories for fscrypt. + if !isDir(mnt.Path) { + log.Printf("ignoring mountpoint %q because it is not a directory", mnt.Path) + continue + } + + // Note this overrides the info if we have seen the mountpoint + // earlier in the file. This is correct behavior because the + // mountpoints are listed in mount order. + allMountsByPath[mnt.Path] = mnt + } + // For each filesystem, choose a "main" Mount and discard any additional + // bind mounts. fscrypt only cares about the main Mount, since it's + // where the fscrypt metadata is stored. Store all the main Mounts in + // mountsByDevice and mountsByPath so that they can be found later. + for _, mnt := range allMountsByPath { + allMountsByDevice[mnt.DeviceNumber] = + append(allMountsByDevice[mnt.DeviceNumber], mnt) + } + for deviceNumber, filesystemMounts := range allMountsByDevice { + mnt := findMainMount(filesystemMounts) + mountsByDevice[deviceNumber] = mnt // may store an explicit nil entry + if mnt != nil { + mountsByPath[mnt.Path] = mnt + } + } + return nil +} + +// loadMountInfo populates the Mount mappings by parsing /proc/self/mountinfo. +// It returns an error if the Mount mappings cannot be populated. +func loadMountInfo() error { + if !mountsInitialized { + file, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer file.Close() + if err := readMountInfo(file); err != nil { + return err + } + mountsInitialized = true + } + return nil +} + +func filesystemLacksMainMountError(deviceNumber DeviceNumber) error { + return errors.Errorf("Device %q (%v) lacks a \"main\" mountpoint in the current mount namespace, so it's ambiguous where to store the fscrypt metadata.", + getDeviceName(deviceNumber), deviceNumber) +} + +// AllFilesystems lists all mounted filesystems ordered by path to their "main" +// Mount. Use CheckSetup() to see if they are set up for use with fscrypt. +func AllFilesystems() ([]*Mount, error) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + return nil, err + } + + mounts := make([]*Mount, 0, len(mountsByPath)) + for _, mount := range mountsByPath { + mounts = append(mounts, mount) + } + + sort.Sort(PathSorter(mounts)) + return mounts, nil +} + +// UpdateMountInfo updates the filesystem mountpoint maps with the current state +// of the filesystem mountpoints. Returns error if the initialization fails. +func UpdateMountInfo() error { + mountMutex.Lock() + defer mountMutex.Unlock() + mountsInitialized = false + return loadMountInfo() +} + +// FindMount returns the main Mount object for the filesystem which contains the +// file at the specified path. An error is returned if the path is invalid or if +// we cannot load the required mount data. If a mount has been updated since the +// last call to one of the mount functions, run UpdateMountInfo to see changes. +func FindMount(path string) (*Mount, error) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + return nil, err + } + // First try to find the mount by the number of the containing device. + deviceNumber, err := getNumberOfContainingDevice(path) + if err != nil { + return nil, err + } + mnt, ok := mountsByDevice[deviceNumber] + if ok { + if mnt == nil { + return nil, filesystemLacksMainMountError(deviceNumber) + } + return mnt, nil + } + // The mount couldn't be found by the number of the containing device. + // Fall back to walking up the directory hierarchy and checking for a + // mount at each directory path. This is necessary for btrfs, where + // files report a different st_dev from the /proc/self/mountinfo entry. + curPath, err := canonicalizePath(path) + if err != nil { + return nil, err + } + for { + mnt := mountsByPath[curPath] + if mnt != nil { + return mnt, nil + } + // Move to the parent directory unless we have reached the root. + parent := filepath.Dir(curPath) + if parent == curPath { + return nil, errors.Errorf("couldn't find mountpoint containing %q", path) + } + curPath = parent + } +} + +// GetMount is like FindMount, except GetMount also returns an error if the path +// doesn't name the same file as the filesystem's "main" Mount. For example, if +// a filesystem is fully mounted at "/mnt" and if "/mnt/a" exists, then +// FindMount("/mnt/a") will succeed whereas GetMount("/mnt/a") will fail. This +// is true even if "/mnt/a" is a bind mount of part of the same filesystem. +func GetMount(mountpoint string) (*Mount, error) { + mnt, err := FindMount(mountpoint) + if err != nil { + return nil, &ErrNotAMountpoint{mountpoint} + } + // Check whether 'mountpoint' names the same directory as 'mnt.Path'. + // Use os.SameFile() (i.e., compare inode numbers) rather than compare + // canonical paths, since filesystems may be mounted in multiple places. + fi1, err := os.Stat(mountpoint) + if err != nil { + return nil, err + } + fi2, err := os.Stat(mnt.Path) + if err != nil { + return nil, err + } + if !os.SameFile(fi1, fi2) { + return nil, &ErrNotAMountpoint{mountpoint} + } + return mnt, nil +} + +func uuidToDeviceNumber(uuid string) (DeviceNumber, error) { + uuidSymlinkPath := filepath.Join(uuidDirectory, uuid) + return getDeviceNumber(uuidSymlinkPath) +} + +func deviceNumberToMount(deviceNumber DeviceNumber) (*Mount, bool) { + mountMutex.Lock() + defer mountMutex.Unlock() + if err := loadMountInfo(); err != nil { + log.Print(err) + return nil, false + } + mnt, ok := mountsByDevice[deviceNumber] + return mnt, ok +} + +// getMountFromLink returns the main Mount, if any, for the filesystem which the +// given link points to. The link should contain a series of token-value pairs +// (=), one per line. The supported tokens are "UUID" and "PATH". +// If the UUID is present and it works, then it is used; otherwise, PATH is used +// if it is present. (The fallback from UUID to PATH will keep the link working +// if the UUID of the target filesystem changes but its mountpoint doesn't.) +// +// If a mount has been updated since the last call to one of the mount +// functions, make sure to run UpdateMountInfo first. +func getMountFromLink(link string) (*Mount, error) { + // Parse the link. + uuid := "" + path := "" + lines := strings.Split(link, "\n") + for _, line := range lines { + line := strings.TrimSpace(line) + if line == "" { + continue + } + pair := strings.Split(line, "=") + if len(pair) != 2 { + log.Printf("ignoring invalid line in filesystem link file: %q", line) + continue + } + token := pair[0] + value := pair[1] + switch token { + case uuidToken: + uuid = value + case pathToken: + path = value + default: + log.Printf("ignoring unknown link token %q", token) + } + } + // At least one of UUID and PATH must be present. + if uuid == "" && path == "" { + return nil, &ErrFollowLink{link, errors.Errorf("invalid filesystem link file")} + } + + // Try following the UUID. + errMsg := "" + if uuid != "" { + deviceNumber, err := uuidToDeviceNumber(uuid) + if err == nil { + mnt, ok := deviceNumberToMount(deviceNumber) + if mnt != nil { + log.Printf("resolved filesystem link using UUID %q", uuid) + return mnt, nil + } + if ok { + return nil, &ErrFollowLink{link, filesystemLacksMainMountError(deviceNumber)} + } + log.Printf("cannot find filesystem with UUID %q", uuid) + } else { + log.Printf("cannot find filesystem with UUID %q: %v", uuid, err) + } + errMsg += fmt.Sprintf("cannot find filesystem with UUID %q", uuid) + if path != "" { + log.Printf("falling back to using mountpoint path instead of UUID") + } + } + // UUID didn't work. As a fallback, try the mountpoint path. + if path != "" { + mnt, err := GetMount(path) + if mnt != nil { + log.Printf("resolved filesystem link using mountpoint path %q", path) + return mnt, nil + } + log.Print(err) + if errMsg == "" { + errMsg = fmt.Sprintf("cannot find filesystem with main mountpoint %q", path) + } else { + errMsg += fmt.Sprintf(" or main mountpoint %q", path) + } + } + // No method worked; return an error. + return nil, &ErrFollowLink{link, errors.New(errMsg)} +} + +func (mnt *Mount) getFilesystemUUID() (string, error) { + dirContents, err := ioutil.ReadDir(uuidDirectory) + if err != nil { + return "", err + } + for _, fileInfo := range dirContents { + if fileInfo.Mode()&os.ModeSymlink == 0 { + continue // Only interested in UUID symlinks + } + uuid := fileInfo.Name() + deviceNumber, err := uuidToDeviceNumber(uuid) + if err != nil { + log.Print(err) + continue + } + if mnt.DeviceNumber == deviceNumber { + return uuid, nil + } + } + return "", errors.Errorf("cannot determine UUID of device %q (%v)", + mnt.Device, mnt.DeviceNumber) +} + +// makeLink creates the contents of a link file which will point to the given +// filesystem. This will normally be a string of the form +// "UUID=\nPATH=\n". If the UUID cannot be determined, the UUID +// portion will be omitted. +func makeLink(mnt *Mount) (string, error) { + uuid, err := mnt.getFilesystemUUID() + if err != nil { + // The UUID could not be determined. This happens for btrfs + // filesystems, as the device number found via + // /dev/disk/by-uuid/* for btrfs filesystems differs from the + // actual device number of the mounted filesystem. Just rely + // entirely on the fallback to mountpoint path. + log.Print(err) + return fmt.Sprintf("%s=%s\n", pathToken, mnt.Path), nil + } + return fmt.Sprintf("%s=%s\n%s=%s\n", uuidToken, uuid, pathToken, mnt.Path), nil +} diff --git a/vendor/github.com/google/fscrypt/filesystem/path.go b/vendor/github.com/google/fscrypt/filesystem/path.go new file mode 100644 index 00000000000..8cfb23574fe --- /dev/null +++ b/vendor/github.com/google/fscrypt/filesystem/path.go @@ -0,0 +1,128 @@ +/* + * path.go - Utility functions for dealing with filesystem paths + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package filesystem + +import ( + "fmt" + "log" + "os" + "path/filepath" + + "golang.org/x/sys/unix" + + "github.com/pkg/errors" +) + +// OpenFileOverridingUmask calls os.OpenFile but with the umask overridden so +// that no permission bits are masked out if the file is created. +func OpenFileOverridingUmask(name string, flag int, perm os.FileMode) (*os.File, error) { + oldMask := unix.Umask(0) + defer unix.Umask(oldMask) + return os.OpenFile(name, flag, perm) +} + +// canonicalizePath turns path into an absolute path without symlinks. +func canonicalizePath(path string) (string, error) { + path, err := filepath.Abs(path) + if err != nil { + return "", err + } + path, err = filepath.EvalSymlinks(path) + + // Get a better error if we have an invalid path + if pathErr, ok := err.(*os.PathError); ok { + err = errors.Wrap(pathErr.Err, pathErr.Path) + } + + return path, err +} + +// loggedStat runs os.Stat, but it logs the error if stat returns any error +// other than nil or IsNotExist. +func loggedStat(name string) (os.FileInfo, error) { + info, err := os.Stat(name) + if err != nil && !os.IsNotExist(err) { + log.Print(err) + } + return info, err +} + +// loggedLstat runs os.Lstat (doesn't dereference trailing symlink), but it logs +// the error if lstat returns any error other than nil or IsNotExist. +func loggedLstat(name string) (os.FileInfo, error) { + info, err := os.Lstat(name) + if err != nil && !os.IsNotExist(err) { + log.Print(err) + } + return info, err +} + +// isDir returns true if the path exists and is that of a directory. +func isDir(path string) bool { + info, err := loggedStat(path) + return err == nil && info.IsDir() +} + +// isRegularFile returns true if the path exists and is that of a regular file. +func isRegularFile(path string) bool { + info, err := loggedStat(path) + return err == nil && info.Mode().IsRegular() +} + +// HaveReadAccessTo returns true if the process has read access to a file or +// directory, without actually opening it. +func HaveReadAccessTo(path string) bool { + return unix.Access(path, unix.R_OK) == nil +} + +// DeviceNumber represents a combined major:minor device number. +type DeviceNumber uint64 + +func (num DeviceNumber) String() string { + return fmt.Sprintf("%d:%d", unix.Major(uint64(num)), unix.Minor(uint64(num))) +} + +func newDeviceNumberFromString(str string) (DeviceNumber, error) { + var major, minor uint32 + if count, _ := fmt.Sscanf(str, "%d:%d", &major, &minor); count != 2 { + return 0, errors.Errorf("invalid device number string %q", str) + } + return DeviceNumber(unix.Mkdev(major, minor)), nil +} + +// getDeviceNumber returns the device number of the device node at the given +// path. If there is a symlink at the path, it is dereferenced. +func getDeviceNumber(path string) (DeviceNumber, error) { + var stat unix.Stat_t + if err := unix.Stat(path, &stat); err != nil { + return 0, err + } + return DeviceNumber(stat.Rdev), nil +} + +// getNumberOfContainingDevice returns the device number of the filesystem which +// contains the given file. If the file is a symlink, it is not dereferenced. +func getNumberOfContainingDevice(path string) (DeviceNumber, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return 0, err + } + return DeviceNumber(stat.Dev), nil +} diff --git a/vendor/github.com/google/fscrypt/keyring/fs_keyring.go b/vendor/github.com/google/fscrypt/keyring/fs_keyring.go new file mode 100644 index 00000000000..9b949b9ea18 --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/fs_keyring.go @@ -0,0 +1,326 @@ +/* + * fs_keyring.go - Add/remove encryption policy keys to/from filesystem + * + * Copyright 2019 Google LLC + * Author: Eric Biggers (ebiggers@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package keyring + +/* +#include +*/ +import "C" + +import ( + "encoding/hex" + "log" + "os" + "os/user" + "sync" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/security" + "github.com/google/fscrypt/util" +) + +var ( + fsKeyringSupported bool + fsKeyringSupportedKnown bool + fsKeyringSupportedLock sync.Mutex +) + +func checkForFsKeyringSupport(mount *filesystem.Mount) bool { + dir, err := os.Open(mount.Path) + if err != nil { + log.Printf("Unexpected error opening %q. Assuming filesystem keyring is unsupported.", + mount.Path) + return false + } + defer dir.Close() + + // FS_IOC_ADD_ENCRYPTION_KEY with a NULL argument will fail with ENOTTY + // if the ioctl isn't supported. Otherwise it should fail with EFAULT. + // + // Note that there's no need to check for FS_IOC_REMOVE_ENCRYPTION_KEY + // support separately, since it's guaranteed to be available if + // FS_IOC_ADD_ENCRYPTION_KEY is. There's also no need to check for + // support on every filesystem separately, since either the kernel + // supports the ioctls on all fscrypt-capable filesystems or it doesn't. + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), unix.FS_IOC_ADD_ENCRYPTION_KEY, 0) + if errno == unix.ENOTTY { + log.Printf("Kernel doesn't support filesystem keyring. Falling back to user keyring.") + return false + } + if errno == unix.EFAULT { + log.Printf("Detected support for filesystem keyring") + } else { + // EFAULT is expected, but as long as we didn't get ENOTTY the + // ioctl should be available. + log.Printf("Unexpected error from FS_IOC_ADD_ENCRYPTION_KEY(%q, NULL): %v", mount.Path, errno) + } + return true +} + +// IsFsKeyringSupported returns true if the kernel supports the ioctls to +// add/remove fscrypt keys directly to/from the filesystem. For support to be +// detected, the given Mount must be for a filesystem that supports fscrypt. +func IsFsKeyringSupported(mount *filesystem.Mount) bool { + fsKeyringSupportedLock.Lock() + defer fsKeyringSupportedLock.Unlock() + if !fsKeyringSupportedKnown { + fsKeyringSupported = checkForFsKeyringSupport(mount) + fsKeyringSupportedKnown = true + } + return fsKeyringSupported +} + +// buildKeySpecifier converts the key descriptor string to an FscryptKeySpecifier. +func buildKeySpecifier(spec *unix.FscryptKeySpecifier, descriptor string) error { + descriptorBytes, err := hex.DecodeString(descriptor) + if err != nil { + return errors.Errorf("key descriptor %q is invalid", descriptor) + } + switch len(descriptorBytes) { + case unix.FSCRYPT_KEY_DESCRIPTOR_SIZE: + spec.Type = unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR + case unix.FSCRYPT_KEY_IDENTIFIER_SIZE: + spec.Type = unix.FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER + default: + return errors.Errorf("key descriptor %q has unknown length", descriptor) + } + copy(spec.U[:], descriptorBytes) + return nil +} + +type savedPrivs struct { + ruid, euid, suid int +} + +// dropPrivsIfNeeded drops privileges (UIDs only) to the given user if we're +// working with a v2 policy key, and if the user is different from the user the +// process is currently running as. +// +// This is needed to change the effective UID so that FS_IOC_ADD_ENCRYPTION_KEY +// and FS_IOC_REMOVE_ENCRYPTION_KEY will add/remove a claim to the key for the +// intended user, and so that FS_IOC_GET_ENCRYPTION_KEY_STATUS will return the +// correct status flags for the user. +func dropPrivsIfNeeded(user *user.User, spec *unix.FscryptKeySpecifier) (*savedPrivs, error) { + if spec.Type == unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR { + // v1 policy keys don't have any concept of user claims. + return nil, nil + } + targetUID := util.AtoiOrPanic(user.Uid) + ruid, euid, suid := security.GetUids() + if euid == targetUID { + return nil, nil + } + if err := security.SetUids(targetUID, targetUID, euid); err != nil { + return nil, err + } + return &savedPrivs{ruid, euid, suid}, nil +} + +// restorePrivs restores root privileges if needed. +func restorePrivs(privs *savedPrivs) error { + if privs != nil { + return security.SetUids(privs.ruid, privs.euid, privs.suid) + } + return nil +} + +// validateKeyDescriptor validates that the correct key descriptor was provided. +// This isn't really necessary; this is just an extra sanity check. +func validateKeyDescriptor(spec *unix.FscryptKeySpecifier, descriptor string) (string, error) { + if spec.Type != unix.FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER { + // v1 policy key: the descriptor is chosen arbitrarily by + // userspace, so there's nothing to validate. + return descriptor, nil + } + // v2 policy key. The descriptor ("identifier" in the kernel UAPI) is + // calculated as a cryptographic hash of the key itself. The kernel + // ignores the provided value, and calculates and returns it itself. So + // verify that the returned value is as expected. If it's not, the key + // doesn't actually match the encryption policy we thought it was for. + actual := hex.EncodeToString(spec.U[:unix.FSCRYPT_KEY_IDENTIFIER_SIZE]) + if descriptor == actual { + return descriptor, nil + } + return actual, + errors.Errorf("provided and actual key descriptors differ (%q != %q)", + descriptor, actual) +} + +// fsAddEncryptionKey adds the specified encryption key to the specified filesystem. +func fsAddEncryptionKey(key *crypto.Key, descriptor string, + mount *filesystem.Mount, user *user.User) error { + + dir, err := os.Open(mount.Path) + if err != nil { + return err + } + defer dir.Close() + + argKey, err := crypto.NewBlankKey(int(unsafe.Sizeof(unix.FscryptAddKeyArg{})) + key.Len()) + if err != nil { + return err + } + defer argKey.Wipe() + arg := (*unix.FscryptAddKeyArg)(argKey.UnsafePtr()) + + if err = buildKeySpecifier(&arg.Key_spec, descriptor); err != nil { + return err + } + + raw := unsafe.Pointer(uintptr(argKey.UnsafePtr()) + unsafe.Sizeof(*arg)) + arg.Raw_size = uint32(key.Len()) + C.memcpy(raw, key.UnsafePtr(), C.size_t(key.Len())) + + savedPrivs, err := dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return err + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), + unix.FS_IOC_ADD_ENCRYPTION_KEY, uintptr(argKey.UnsafePtr())) + restorePrivs(savedPrivs) + + log.Printf("FS_IOC_ADD_ENCRYPTION_KEY(%q, %s, ) = %v", mount.Path, descriptor, errno) + if errno != 0 { + return errors.Wrapf(errno, + "error adding key with descriptor %s to filesystem %s", + descriptor, mount.Path) + } + if descriptor, err = validateKeyDescriptor(&arg.Key_spec, descriptor); err != nil { + fsRemoveEncryptionKey(descriptor, mount, user) + return err + } + return nil +} + +// fsRemoveEncryptionKey removes the specified encryption key from the specified +// filesystem. +func fsRemoveEncryptionKey(descriptor string, mount *filesystem.Mount, + user *user.User) error { + + dir, err := os.Open(mount.Path) + if err != nil { + return err + } + defer dir.Close() + + var arg unix.FscryptRemoveKeyArg + if err = buildKeySpecifier(&arg.Key_spec, descriptor); err != nil { + return err + } + + ioc := uintptr(unix.FS_IOC_REMOVE_ENCRYPTION_KEY) + iocName := "FS_IOC_REMOVE_ENCRYPTION_KEY" + var savedPrivs *savedPrivs + if user == nil { + ioc = unix.FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS + iocName = "FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS" + } else { + savedPrivs, err = dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return err + } + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), ioc, uintptr(unsafe.Pointer(&arg))) + restorePrivs(savedPrivs) + + log.Printf("%s(%q, %s) = %v, removal_status_flags=0x%x", + iocName, mount.Path, descriptor, errno, arg.Removal_status_flags) + switch errno { + case 0: + switch { + case arg.Removal_status_flags&unix.FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS != 0: + return ErrKeyAddedByOtherUsers + case arg.Removal_status_flags&unix.FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY != 0: + return ErrKeyFilesOpen + } + return nil + case unix.ENOKEY: + // ENOKEY means either the key is completely missing or that the + // current user doesn't have a claim to it. Distinguish between + // these two cases by getting the key status. + if user != nil { + status, _ := fsGetEncryptionKeyStatus(descriptor, mount, user) + if status == KeyPresentButOnlyOtherUsers { + return ErrKeyAddedByOtherUsers + } + } + return ErrKeyNotPresent + default: + return errors.Wrapf(errno, + "error removing key with descriptor %s from filesystem %s", + descriptor, mount.Path) + } +} + +// fsGetEncryptionKeyStatus gets the status of the specified encryption key on +// the specified filesystem. +func fsGetEncryptionKeyStatus(descriptor string, mount *filesystem.Mount, + user *user.User) (KeyStatus, error) { + + dir, err := os.Open(mount.Path) + if err != nil { + return KeyStatusUnknown, err + } + defer dir.Close() + + var arg unix.FscryptGetKeyStatusArg + err = buildKeySpecifier(&arg.Key_spec, descriptor) + if err != nil { + return KeyStatusUnknown, err + } + + savedPrivs, err := dropPrivsIfNeeded(user, &arg.Key_spec) + if err != nil { + return KeyStatusUnknown, err + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dir.Fd(), + unix.FS_IOC_GET_ENCRYPTION_KEY_STATUS, uintptr(unsafe.Pointer(&arg))) + restorePrivs(savedPrivs) + + log.Printf("FS_IOC_GET_ENCRYPTION_KEY_STATUS(%q, %s) = %v, status=%d, status_flags=0x%x", + mount.Path, descriptor, errno, arg.Status, arg.Status_flags) + if errno != 0 { + return KeyStatusUnknown, + errors.Wrapf(errno, + "error getting status of key with descriptor %s on filesystem %s", + descriptor, mount.Path) + } + switch arg.Status { + case unix.FSCRYPT_KEY_STATUS_ABSENT: + return KeyAbsent, nil + case unix.FSCRYPT_KEY_STATUS_PRESENT: + if arg.Key_spec.Type != unix.FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + (arg.Status_flags&unix.FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF) == 0 { + return KeyPresentButOnlyOtherUsers, nil + } + return KeyPresent, nil + case unix.FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED: + return KeyAbsentButFilesBusy, nil + default: + return KeyStatusUnknown, + errors.Errorf("unknown key status (%d) for key with descriptor %s on filesystem %s", + arg.Status, descriptor, mount.Path) + } +} diff --git a/vendor/github.com/google/fscrypt/keyring/keyring.go b/vendor/github.com/google/fscrypt/keyring/keyring.go new file mode 100644 index 00000000000..5ddceaf8b7c --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/keyring.go @@ -0,0 +1,175 @@ +/* + * keyring.go - Add/remove encryption policy keys to/from kernel + * + * Copyright 2019 Google LLC + * Author: Eric Biggers (ebiggers@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package keyring manages adding, removing, and getting the status of +// encryption policy keys to/from the kernel. Most public functions are in +// keyring.go, and they delegate to either user_keyring.go or fs_keyring.go, +// depending on whether a user keyring or a filesystem keyring is being used. +// +// v2 encryption policies always use the filesystem keyring. +// v1 policies use the user keyring by default, but can be configured to use the +// filesystem keyring instead (requires root and kernel v5.4+). +package keyring + +import ( + "encoding/hex" + "os/user" + "strconv" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/filesystem" + "github.com/google/fscrypt/metadata" + "github.com/google/fscrypt/util" +) + +// Keyring error values +var ( + ErrKeyAddedByOtherUsers = errors.New("other users have added the key too") + ErrKeyFilesOpen = errors.New("some files using the key are still open") + ErrKeyNotPresent = errors.New("key not present or already removed") + ErrV2PoliciesUnsupported = errors.New("kernel is too old to support v2 encryption policies") +) + +// Options are the options which specify *which* keyring the key should be +// added/removed/gotten to, and how. +type Options struct { + // Mount is the filesystem to which the key should be + // added/removed/gotten. + Mount *filesystem.Mount + // User is the user for whom the key should be added/removed/gotten. + User *user.User + // UseFsKeyringForV1Policies is true if keys for v1 encryption policies + // should be put in the filesystem's keyring (if supported) rather than + // in the user's keyring. Note that this makes AddEncryptionKey and + // RemoveEncryptionKey require root privileges. + UseFsKeyringForV1Policies bool +} + +func shouldUseFsKeyring(descriptor string, options *Options) (bool, error) { + // For v1 encryption policy keys, use the filesystem keyring if + // use_fs_keyring_for_v1_policies is set in /etc/fscrypt.conf and the + // kernel supports it. + if len(descriptor) == hex.EncodedLen(unix.FSCRYPT_KEY_DESCRIPTOR_SIZE) { + return options.UseFsKeyringForV1Policies && IsFsKeyringSupported(options.Mount), nil + } + // For v2 encryption policy keys, always use the filesystem keyring; the + // kernel doesn't support any other way. + if !IsFsKeyringSupported(options.Mount) { + return true, ErrV2PoliciesUnsupported + } + return true, nil +} + +// buildKeyDescription builds the description for an fscrypt key of type +// "logon". For ext4 and f2fs, it uses the legacy filesystem-specific prefixes +// for compatibility with kernels before v4.8 and v4.6 respectively. For other +// filesystems it uses the generic prefix "fscrypt". +func buildKeyDescription(options *Options, descriptor string) string { + switch options.Mount.FilesystemType { + case "ext4", "f2fs": + return options.Mount.FilesystemType + ":" + descriptor + default: + return unix.FSCRYPT_KEY_DESC_PREFIX + descriptor + } +} + +// AddEncryptionKey adds an encryption policy key to a kernel keyring. It uses +// either the filesystem keyring for the target Mount or the user keyring for +// the target User. +func AddEncryptionKey(key *crypto.Key, descriptor string, options *Options) error { + if err := util.CheckValidLength(metadata.PolicyKeyLen, key.Len()); err != nil { + return errors.Wrap(err, "policy key") + } + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return err + } + if useFsKeyring { + return fsAddEncryptionKey(key, descriptor, options.Mount, options.User) + } + return userAddKey(key, buildKeyDescription(options, descriptor), options.User) +} + +// RemoveEncryptionKey removes an encryption policy key from a kernel keyring. +// It uses either the filesystem keyring for the target Mount or the user +// keyring for the target User. +func RemoveEncryptionKey(descriptor string, options *Options, allUsers bool) error { + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return err + } + if useFsKeyring { + user := options.User + if allUsers { + user = nil + } + return fsRemoveEncryptionKey(descriptor, options.Mount, user) + } + return userRemoveKey(buildKeyDescription(options, descriptor), options.User) +} + +// KeyStatus is an enum that represents the status of a key in a kernel keyring. +type KeyStatus int + +// The possible values of KeyStatus. +const ( + KeyStatusUnknown = 0 + iota + KeyAbsent + KeyAbsentButFilesBusy + KeyPresent + KeyPresentButOnlyOtherUsers +) + +func (status KeyStatus) String() string { + switch status { + case KeyStatusUnknown: + return "Unknown" + case KeyAbsent: + return "Absent" + case KeyAbsentButFilesBusy: + return "AbsentButFilesBusy" + case KeyPresent: + return "Present" + case KeyPresentButOnlyOtherUsers: + return "PresentButOnlyOtherUsers" + default: + return strconv.Itoa(int(status)) + } +} + +// GetEncryptionKeyStatus gets the status of an encryption policy key in a +// kernel keyring. It uses either the filesystem keyring for the target Mount +// or the user keyring for the target User. +func GetEncryptionKeyStatus(descriptor string, options *Options) (KeyStatus, error) { + useFsKeyring, err := shouldUseFsKeyring(descriptor, options) + if err != nil { + return KeyStatusUnknown, err + } + if useFsKeyring { + return fsGetEncryptionKeyStatus(descriptor, options.Mount, options.User) + } + _, _, err = userFindKey(buildKeyDescription(options, descriptor), options.User) + if err != nil { + return KeyAbsent, nil + } + return KeyPresent, nil +} diff --git a/vendor/github.com/google/fscrypt/keyring/user_keyring.go b/vendor/github.com/google/fscrypt/keyring/user_keyring.go new file mode 100644 index 00000000000..0ea46895745 --- /dev/null +++ b/vendor/github.com/google/fscrypt/keyring/user_keyring.go @@ -0,0 +1,251 @@ +/* + * user_keyring.go - Add/remove encryption policy keys to/from user keyrings. + * This is the deprecated mechanism; see fs_keyring.go for the new mechanism. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package keyring + +import ( + "os/user" + "runtime" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "fmt" + "log" + + "github.com/google/fscrypt/crypto" + "github.com/google/fscrypt/security" + "github.com/google/fscrypt/util" +) + +// ErrAccessUserKeyring indicates that a user's keyring cannot be +// accessed. +type ErrAccessUserKeyring struct { + TargetUser *user.User + UnderlyingError error +} + +func (err *ErrAccessUserKeyring) Error() string { + return fmt.Sprintf("could not access user keyring for %q: %s", + err.TargetUser.Username, err.UnderlyingError) +} + +// ErrSessionUserKeyring indicates that a user's keyring is not linked +// into the session keyring. +type ErrSessionUserKeyring struct { + TargetUser *user.User +} + +func (err *ErrSessionUserKeyring) Error() string { + return fmt.Sprintf("user keyring for %q is not linked into the session keyring", + err.TargetUser.Username) +} + +// KeyType is always logon as required by filesystem encryption. +const KeyType = "logon" + +// userAddKey puts the provided policy key into the user keyring for the +// specified user with the provided description, and type logon. +func userAddKey(key *crypto.Key, description string, targetUser *user.User) error { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + // Create our payload (containing an FscryptKey) + payload, err := crypto.NewBlankKey(int(unsafe.Sizeof(unix.FscryptKey{}))) + if err != nil { + return err + } + defer payload.Wipe() + + // Cast the payload to an FscryptKey so we can initialize the fields. + fscryptKey := (*unix.FscryptKey)(payload.UnsafePtr()) + // Mode is ignored by the kernel + fscryptKey.Mode = 0 + fscryptKey.Size = uint32(key.Len()) + copy(fscryptKey.Raw[:], key.Data()) + + keyringID, err := UserKeyringID(targetUser, true) + if err != nil { + return err + } + keyID, err := unix.AddKey(KeyType, description, payload.Data(), keyringID) + log.Printf("KeyctlAddKey(%s, %s, , %d) = %d, %v", + KeyType, description, keyringID, keyID, err) + if err != nil { + return errors.Wrapf(err, + "error adding key with description %s to user keyring for %q", + description, targetUser.Username) + } + return nil +} + +// userRemoveKey tries to remove a policy key from the user keyring with the +// provided description. An error is returned if the key does not exist. +func userRemoveKey(description string, targetUser *user.User) error { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + keyID, keyringID, err := userFindKey(description, targetUser) + if err != nil { + return ErrKeyNotPresent + } + + _, err = unix.KeyctlInt(unix.KEYCTL_UNLINK, keyID, keyringID, 0, 0) + log.Printf("KeyctlUnlink(%d, %d) = %v", keyID, keyringID, err) + if err != nil { + return errors.Wrapf(err, + "error removing key with description %s from user keyring for %q", + description, targetUser.Username) + } + return nil +} + +// userFindKey tries to locate a key with the provided description in the user +// keyring for the target user. The key ID and keyring ID are returned if we can +// find the key. An error is returned if the key does not exist. +func userFindKey(description string, targetUser *user.User) (int, int, error) { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + keyringID, err := UserKeyringID(targetUser, false) + if err != nil { + return 0, 0, err + } + + keyID, err := unix.KeyctlSearch(keyringID, KeyType, description, 0) + log.Printf("KeyctlSearch(%d, %s, %s) = %d, %v", keyringID, KeyType, description, keyID, err) + if err != nil { + return 0, 0, errors.Wrapf(err, + "error searching for key %s in user keyring for %q", + description, targetUser.Username) + } + return keyID, keyringID, err +} + +// UserKeyringID returns the key id of the target user's user keyring. We also +// ensure that the keyring will be accessible by linking it into the thread +// keyring and linking it into the root user keyring (permissions allowing). If +// checkSession is true, an error is returned if a normal user requests their +// user keyring, but it is not in the current session keyring. +func UserKeyringID(targetUser *user.User, checkSession bool) (int, error) { + runtime.LockOSThread() // ensure target user keyring remains possessed in thread keyring + defer runtime.UnlockOSThread() + + uid := util.AtoiOrPanic(targetUser.Uid) + targetKeyring, err := userKeyringIDLookup(uid) + if err != nil { + return 0, &ErrAccessUserKeyring{targetUser, err} + } + + if !util.IsUserRoot() { + // Make sure the returned keyring will be accessible by checking + // that it is in the session keyring. + if checkSession && !isUserKeyringInSession(uid) { + return 0, &ErrSessionUserKeyring{targetUser} + } + return targetKeyring, nil + } + + // Make sure the returned keyring will be accessible by linking it into + // the root user's user keyring (which will not be garbage collected). + rootKeyring, err := userKeyringIDLookup(0) + if err != nil { + return 0, errors.Wrapf(err, "error looking up root's user keyring") + } + + if rootKeyring != targetKeyring { + if err = keyringLink(targetKeyring, rootKeyring); err != nil { + return 0, errors.Wrapf(err, + "error linking user keyring for %q into root's user keyring", + targetUser.Username) + } + } + return targetKeyring, nil +} + +func userKeyringIDLookup(uid int) (keyringID int, err error) { + + // Our goals here are to: + // - Find the user keyring (for the provided uid) + // - Link it into the current thread keyring (so we can use it) + // - Make no permanent changes to the process privileges + // Complicating this are the facts that: + // - The value of KEY_SPEC_USER_KEYRING is determined by the ruid + // - Keyring linking permissions use the euid + // So we have to change both the ruid and euid to make this work, + // setting the suid to 0 so that we can later switch back. + ruid, euid, suid := security.GetUids() + if ruid != uid || euid != uid { + if err = security.SetUids(uid, uid, 0); err != nil { + return + } + defer func() { + resetErr := security.SetUids(ruid, euid, suid) + if resetErr != nil { + err = resetErr + } + }() + } + + // We get the value of KEY_SPEC_USER_KEYRING. Note that this will also + // trigger the creation of the uid keyring if it does not yet exist. + keyringID, err = unix.KeyctlGetKeyringID(unix.KEY_SPEC_USER_KEYRING, true) + log.Printf("keyringID(_uid.%d) = %d, %v", uid, keyringID, err) + if err != nil { + return 0, err + } + + // We still want to use this keyring after our privileges are reset. So + // we link it into the thread keyring, preventing a loss of access. + // + // We must be under LockOSThread() for this to work reliably. Note that + // we can't just use the process keyring, since it doesn't work reliably + // in Go programs, due to the Go runtime creating threads before the + // program starts and has a chance to create the process keyring. + if err = keyringLink(keyringID, unix.KEY_SPEC_THREAD_KEYRING); err != nil { + return 0, err + } + + return keyringID, nil +} + +// isUserKeyringInSession tells us if the user's uid keyring is in the current +// session keyring. +func isUserKeyringInSession(uid int) bool { + // We cannot use unix.KEY_SPEC_SESSION_KEYRING directly as that might + // create a session keyring if one does not exist. + sessionKeyring, err := unix.KeyctlGetKeyringID(unix.KEY_SPEC_SESSION_KEYRING, false) + log.Printf("keyringID(session) = %d, %v", sessionKeyring, err) + if err != nil { + return false + } + + description := fmt.Sprintf("_uid.%d", uid) + id, err := unix.KeyctlSearch(sessionKeyring, "keyring", description, 0) + log.Printf("KeyctlSearch(%d, keyring, %s) = %d, %v", sessionKeyring, description, id, err) + return err == nil +} + +func keyringLink(keyID int, keyringID int) error { + _, err := unix.KeyctlInt(unix.KEYCTL_LINK, keyID, keyringID, 0, 0) + log.Printf("KeyctlLink(%d, %d) = %v", keyID, keyringID, err) + return err +} diff --git a/vendor/github.com/google/fscrypt/metadata/checks.go b/vendor/github.com/google/fscrypt/metadata/checks.go new file mode 100644 index 00000000000..84fd208c863 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/checks.go @@ -0,0 +1,221 @@ +/* + * checks.go - Some sanity check methods for our metadata structures + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + + "github.com/google/fscrypt/util" +) + +var errNotInitialized = errors.New("not initialized") + +// Metadata is the interface to all of the protobuf structures that can be +// checked for validity. +type Metadata interface { + CheckValidity() error + proto.Message +} + +// CheckValidity ensures the mode has a name and isn't empty. +func (m EncryptionOptions_Mode) CheckValidity() error { + if m == EncryptionOptions_default { + return errNotInitialized + } + if m.String() == "" { + return errors.Errorf("unknown %d", m) + } + return nil +} + +// CheckValidity ensures the source has a name and isn't empty. +func (s SourceType) CheckValidity() error { + if s == SourceType_default { + return errNotInitialized + } + if s.String() == "" { + return errors.Errorf("unknown %d", s) + } + return nil +} + +// CheckValidity ensures the hash costs will be accepted by Argon2. +func (h *HashingCosts) CheckValidity() error { + if h == nil { + return errNotInitialized + } + if h.Time <= 0 { + return errors.Errorf("time=%d is not positive", h.Time) + } + if h.Parallelism <= 0 { + return errors.Errorf("parallelism=%d is not positive", h.Parallelism) + } + minMemory := 8 * h.Parallelism + if h.Memory < minMemory { + return errors.Errorf("memory=%d is less than minimum (%d)", h.Memory, minMemory) + } + return nil +} + +// CheckValidity ensures our buffers are the correct length. +func (w *WrappedKeyData) CheckValidity() error { + if w == nil { + return errNotInitialized + } + if len(w.EncryptedKey) == 0 { + return errors.Wrap(errNotInitialized, "encrypted key") + } + if err := util.CheckValidLength(IVLen, len(w.IV)); err != nil { + return errors.Wrap(err, "IV") + } + return errors.Wrap(util.CheckValidLength(HMACLen, len(w.Hmac)), "HMAC") +} + +// CheckValidity ensures our ProtectorData has the correct fields for its source. +func (p *ProtectorData) CheckValidity() error { + if p == nil { + return errNotInitialized + } + + if err := p.Source.CheckValidity(); err != nil { + return errors.Wrap(err, "protector source") + } + + // Source specific checks + switch p.Source { + case SourceType_pam_passphrase: + if p.Uid < 0 { + return errors.Errorf("UID=%d is negative", p.Uid) + } + fallthrough + case SourceType_custom_passphrase: + if err := p.Costs.CheckValidity(); err != nil { + return errors.Wrap(err, "passphrase hashing costs") + } + if err := util.CheckValidLength(SaltLen, len(p.Salt)); err != nil { + return errors.Wrap(err, "passphrase hashing salt") + } + } + + // Generic checks + if err := p.WrappedKey.CheckValidity(); err != nil { + return errors.Wrap(err, "wrapped protector key") + } + if err := util.CheckValidLength(ProtectorDescriptorLen, len(p.ProtectorDescriptor)); err != nil { + return errors.Wrap(err, "protector descriptor") + + } + err := util.CheckValidLength(InternalKeyLen, len(p.WrappedKey.EncryptedKey)) + return errors.Wrap(err, "encrypted protector key") +} + +// CheckValidity ensures each of the options is valid. +func (e *EncryptionOptions) CheckValidity() error { + if e == nil { + return errNotInitialized + } + if _, ok := util.Index(e.Padding, paddingArray); !ok { + return errors.Errorf("padding of %d is invalid", e.Padding) + } + if err := e.Contents.CheckValidity(); err != nil { + return errors.Wrap(err, "contents encryption mode") + } + if err := e.Filenames.CheckValidity(); err != nil { + return errors.Wrap(err, "filenames encryption mode") + } + // If PolicyVersion is unset, treat it as 1. + if e.PolicyVersion == 0 { + e.PolicyVersion = 1 + } + if e.PolicyVersion != 1 && e.PolicyVersion != 2 { + return errors.Errorf("policy version of %d is invalid", e.PolicyVersion) + } + return nil +} + +// CheckValidity ensures the fields are valid and have the correct lengths. +func (w *WrappedPolicyKey) CheckValidity() error { + if w == nil { + return errNotInitialized + } + if err := w.WrappedKey.CheckValidity(); err != nil { + return errors.Wrap(err, "wrapped key") + } + if err := util.CheckValidLength(PolicyKeyLen, len(w.WrappedKey.EncryptedKey)); err != nil { + return errors.Wrap(err, "encrypted key") + } + err := util.CheckValidLength(ProtectorDescriptorLen, len(w.ProtectorDescriptor)) + return errors.Wrap(err, "wrapping protector descriptor") +} + +// CheckValidity ensures the fields and each wrapped key are valid. +func (p *PolicyData) CheckValidity() error { + if p == nil { + return errNotInitialized + } + // Check each wrapped key + for i, w := range p.WrappedPolicyKeys { + if err := w.CheckValidity(); err != nil { + return errors.Wrapf(err, "policy key slot %d", i) + } + } + + if err := p.Options.CheckValidity(); err != nil { + return errors.Wrap(err, "policy options") + } + + var expectedLen int + switch p.Options.PolicyVersion { + case 1: + expectedLen = PolicyDescriptorLenV1 + case 2: + expectedLen = PolicyDescriptorLenV2 + default: + return errors.Errorf("policy version of %d is invalid", p.Options.PolicyVersion) + } + + if err := util.CheckValidLength(expectedLen, len(p.KeyDescriptor)); err != nil { + return errors.Wrap(err, "policy key descriptor") + } + + return nil +} + +// CheckValidity ensures the Config has all the necessary info for its Source. +func (c *Config) CheckValidity() error { + // General checks + if c == nil { + return errNotInitialized + } + if err := c.Source.CheckValidity(); err != nil { + return errors.Wrap(err, "default config source") + } + + // Source specific checks + switch c.Source { + case SourceType_pam_passphrase, SourceType_custom_passphrase: + if err := c.HashCosts.CheckValidity(); err != nil { + return errors.Wrap(err, "config hashing costs") + } + } + + return errors.Wrap(c.Options.CheckValidity(), "config options") +} diff --git a/vendor/github.com/google/fscrypt/metadata/config.go b/vendor/github.com/google/fscrypt/metadata/config.go new file mode 100644 index 00000000000..b3c872693ea --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/config.go @@ -0,0 +1,59 @@ +/* + * config.go - Parsing for our global config file. The file is simply the JSON + * output of the Config protocol buffer. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package metadata contains all of the on disk structures. +// These structures are defined in metadata.proto. The package also +// contains functions for manipulating these structures, specifically: +// * Reading and Writing the Config file to disk +// * Getting and Setting Policies for directories +// * Reasonable defaults for a Policy's EncryptionOptions +package metadata + +import ( + "io" + + "github.com/golang/protobuf/jsonpb" +) + +// WriteConfig outputs the Config data as nicely formatted JSON +func WriteConfig(config *Config, out io.Writer) error { + m := jsonpb.Marshaler{ + EmitDefaults: true, + EnumsAsInts: false, + Indent: "\t", + OrigName: true, + } + if err := m.Marshal(out, config); err != nil { + return err + } + + _, err := out.Write([]byte{'\n'}) + return err +} + +// ReadConfig writes the JSON data into the config structure +func ReadConfig(in io.Reader) (*Config, error) { + config := new(Config) + // Allow (and ignore) unknown fields for forwards compatibility. + u := jsonpb.Unmarshaler{ + AllowUnknownFields: true, + } + return config, u.Unmarshal(in, config) +} diff --git a/vendor/github.com/google/fscrypt/metadata/constants.go b/vendor/github.com/google/fscrypt/metadata/constants.go new file mode 100644 index 00000000000..fa6b8a7596b --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/constants.go @@ -0,0 +1,57 @@ +/* + * constants.go - Some metadata constants used throughout fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "crypto/sha256" + + "golang.org/x/sys/unix" +) + +// Lengths for our keys, buffers, and strings used in fscrypt. +const ( + // Length of policy descriptor (in hex chars) for v1 encryption policies + PolicyDescriptorLenV1 = 2 * unix.FSCRYPT_KEY_DESCRIPTOR_SIZE + // Length of protector descriptor (in hex chars) + ProtectorDescriptorLen = PolicyDescriptorLenV1 + // Length of policy descriptor (in hex chars) for v2 encryption policies + PolicyDescriptorLenV2 = 2 * unix.FSCRYPT_KEY_IDENTIFIER_SIZE + // We always use 256-bit keys internally (compared to 512-bit policy keys). + InternalKeyLen = 32 + IVLen = 16 + SaltLen = 16 + // We use SHA256 for the HMAC, and len(HMAC) == len(hash size). + HMACLen = sha256.Size + // PolicyKeyLen is the length of all keys passed directly to the Keyring + PolicyKeyLen = unix.FSCRYPT_MAX_KEY_SIZE +) + +var ( + // DefaultOptions use the supported encryption modes, max padding, and + // policy version 1. + DefaultOptions = &EncryptionOptions{ + Padding: 32, + Contents: EncryptionOptions_AES_256_XTS, + Filenames: EncryptionOptions_AES_256_CTS, + PolicyVersion: 1, + } + // DefaultSource is the source we use if none is specified. + DefaultSource = SourceType_custom_passphrase +) diff --git a/vendor/github.com/google/fscrypt/metadata/metadata.pb.go b/vendor/github.com/google/fscrypt/metadata/metadata.pb.go new file mode 100644 index 00000000000..67098043cad --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/metadata.pb.go @@ -0,0 +1,589 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metadata/metadata.proto + +package metadata + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Specifies the method in which an outside secret is obtained for a Protector +type SourceType int32 + +const ( + SourceType_default SourceType = 0 + SourceType_pam_passphrase SourceType = 1 + SourceType_custom_passphrase SourceType = 2 + SourceType_raw_key SourceType = 3 +) + +var SourceType_name = map[int32]string{ + 0: "default", + 1: "pam_passphrase", + 2: "custom_passphrase", + 3: "raw_key", +} +var SourceType_value = map[string]int32{ + "default": 0, + "pam_passphrase": 1, + "custom_passphrase": 2, + "raw_key": 3, +} + +func (x SourceType) String() string { + return proto.EnumName(SourceType_name, int32(x)) +} +func (SourceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{0} +} + +// Type of encryption; should match declarations of unix.FSCRYPT_MODE +type EncryptionOptions_Mode int32 + +const ( + EncryptionOptions_default EncryptionOptions_Mode = 0 + EncryptionOptions_AES_256_XTS EncryptionOptions_Mode = 1 + EncryptionOptions_AES_256_GCM EncryptionOptions_Mode = 2 + EncryptionOptions_AES_256_CBC EncryptionOptions_Mode = 3 + EncryptionOptions_AES_256_CTS EncryptionOptions_Mode = 4 + EncryptionOptions_AES_128_CBC EncryptionOptions_Mode = 5 + EncryptionOptions_AES_128_CTS EncryptionOptions_Mode = 6 + EncryptionOptions_Adiantum EncryptionOptions_Mode = 9 +) + +var EncryptionOptions_Mode_name = map[int32]string{ + 0: "default", + 1: "AES_256_XTS", + 2: "AES_256_GCM", + 3: "AES_256_CBC", + 4: "AES_256_CTS", + 5: "AES_128_CBC", + 6: "AES_128_CTS", + 9: "Adiantum", +} +var EncryptionOptions_Mode_value = map[string]int32{ + "default": 0, + "AES_256_XTS": 1, + "AES_256_GCM": 2, + "AES_256_CBC": 3, + "AES_256_CTS": 4, + "AES_128_CBC": 5, + "AES_128_CTS": 6, + "Adiantum": 9, +} + +func (x EncryptionOptions_Mode) String() string { + return proto.EnumName(EncryptionOptions_Mode_name, int32(x)) +} +func (EncryptionOptions_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{3, 0} +} + +// Cost parameters to be used in our hashing functions. +type HashingCosts struct { + Time int64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"` + Memory int64 `protobuf:"varint,3,opt,name=memory,proto3" json:"memory,omitempty"` + Parallelism int64 `protobuf:"varint,4,opt,name=parallelism,proto3" json:"parallelism,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HashingCosts) Reset() { *m = HashingCosts{} } +func (m *HashingCosts) String() string { return proto.CompactTextString(m) } +func (*HashingCosts) ProtoMessage() {} +func (*HashingCosts) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{0} +} +func (m *HashingCosts) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HashingCosts.Unmarshal(m, b) +} +func (m *HashingCosts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HashingCosts.Marshal(b, m, deterministic) +} +func (dst *HashingCosts) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashingCosts.Merge(dst, src) +} +func (m *HashingCosts) XXX_Size() int { + return xxx_messageInfo_HashingCosts.Size(m) +} +func (m *HashingCosts) XXX_DiscardUnknown() { + xxx_messageInfo_HashingCosts.DiscardUnknown(m) +} + +var xxx_messageInfo_HashingCosts proto.InternalMessageInfo + +func (m *HashingCosts) GetTime() int64 { + if m != nil { + return m.Time + } + return 0 +} + +func (m *HashingCosts) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func (m *HashingCosts) GetParallelism() int64 { + if m != nil { + return m.Parallelism + } + return 0 +} + +// This structure is used for our authenticated wrapping/unwrapping of keys. +type WrappedKeyData struct { + IV []byte `protobuf:"bytes,1,opt,name=IV,proto3" json:"IV,omitempty"` + EncryptedKey []byte `protobuf:"bytes,2,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"` + Hmac []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WrappedKeyData) Reset() { *m = WrappedKeyData{} } +func (m *WrappedKeyData) String() string { return proto.CompactTextString(m) } +func (*WrappedKeyData) ProtoMessage() {} +func (*WrappedKeyData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{1} +} +func (m *WrappedKeyData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WrappedKeyData.Unmarshal(m, b) +} +func (m *WrappedKeyData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WrappedKeyData.Marshal(b, m, deterministic) +} +func (dst *WrappedKeyData) XXX_Merge(src proto.Message) { + xxx_messageInfo_WrappedKeyData.Merge(dst, src) +} +func (m *WrappedKeyData) XXX_Size() int { + return xxx_messageInfo_WrappedKeyData.Size(m) +} +func (m *WrappedKeyData) XXX_DiscardUnknown() { + xxx_messageInfo_WrappedKeyData.DiscardUnknown(m) +} + +var xxx_messageInfo_WrappedKeyData proto.InternalMessageInfo + +func (m *WrappedKeyData) GetIV() []byte { + if m != nil { + return m.IV + } + return nil +} + +func (m *WrappedKeyData) GetEncryptedKey() []byte { + if m != nil { + return m.EncryptedKey + } + return nil +} + +func (m *WrappedKeyData) GetHmac() []byte { + if m != nil { + return m.Hmac + } + return nil +} + +// The associated data for each protector +type ProtectorData struct { + ProtectorDescriptor string `protobuf:"bytes,1,opt,name=protector_descriptor,json=protectorDescriptor,proto3" json:"protector_descriptor,omitempty"` + Source SourceType `protobuf:"varint,2,opt,name=source,proto3,enum=metadata.SourceType" json:"source,omitempty"` + // These are only used by some of the protector types + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Costs *HashingCosts `protobuf:"bytes,4,opt,name=costs,proto3" json:"costs,omitempty"` + Salt []byte `protobuf:"bytes,5,opt,name=salt,proto3" json:"salt,omitempty"` + Uid int64 `protobuf:"varint,6,opt,name=uid,proto3" json:"uid,omitempty"` + WrappedKey *WrappedKeyData `protobuf:"bytes,7,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProtectorData) Reset() { *m = ProtectorData{} } +func (m *ProtectorData) String() string { return proto.CompactTextString(m) } +func (*ProtectorData) ProtoMessage() {} +func (*ProtectorData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{2} +} +func (m *ProtectorData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProtectorData.Unmarshal(m, b) +} +func (m *ProtectorData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProtectorData.Marshal(b, m, deterministic) +} +func (dst *ProtectorData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtectorData.Merge(dst, src) +} +func (m *ProtectorData) XXX_Size() int { + return xxx_messageInfo_ProtectorData.Size(m) +} +func (m *ProtectorData) XXX_DiscardUnknown() { + xxx_messageInfo_ProtectorData.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtectorData proto.InternalMessageInfo + +func (m *ProtectorData) GetProtectorDescriptor() string { + if m != nil { + return m.ProtectorDescriptor + } + return "" +} + +func (m *ProtectorData) GetSource() SourceType { + if m != nil { + return m.Source + } + return SourceType_default +} + +func (m *ProtectorData) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ProtectorData) GetCosts() *HashingCosts { + if m != nil { + return m.Costs + } + return nil +} + +func (m *ProtectorData) GetSalt() []byte { + if m != nil { + return m.Salt + } + return nil +} + +func (m *ProtectorData) GetUid() int64 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *ProtectorData) GetWrappedKey() *WrappedKeyData { + if m != nil { + return m.WrappedKey + } + return nil +} + +// Encryption policy specifics, corresponds to the fscrypt_policy struct +type EncryptionOptions struct { + Padding int64 `protobuf:"varint,1,opt,name=padding,proto3" json:"padding,omitempty"` + Contents EncryptionOptions_Mode `protobuf:"varint,2,opt,name=contents,proto3,enum=metadata.EncryptionOptions_Mode" json:"contents,omitempty"` + Filenames EncryptionOptions_Mode `protobuf:"varint,3,opt,name=filenames,proto3,enum=metadata.EncryptionOptions_Mode" json:"filenames,omitempty"` + PolicyVersion int64 `protobuf:"varint,4,opt,name=policy_version,json=policyVersion,proto3" json:"policy_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptionOptions) Reset() { *m = EncryptionOptions{} } +func (m *EncryptionOptions) String() string { return proto.CompactTextString(m) } +func (*EncryptionOptions) ProtoMessage() {} +func (*EncryptionOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{3} +} +func (m *EncryptionOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptionOptions.Unmarshal(m, b) +} +func (m *EncryptionOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptionOptions.Marshal(b, m, deterministic) +} +func (dst *EncryptionOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptionOptions.Merge(dst, src) +} +func (m *EncryptionOptions) XXX_Size() int { + return xxx_messageInfo_EncryptionOptions.Size(m) +} +func (m *EncryptionOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptionOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptionOptions proto.InternalMessageInfo + +func (m *EncryptionOptions) GetPadding() int64 { + if m != nil { + return m.Padding + } + return 0 +} + +func (m *EncryptionOptions) GetContents() EncryptionOptions_Mode { + if m != nil { + return m.Contents + } + return EncryptionOptions_default +} + +func (m *EncryptionOptions) GetFilenames() EncryptionOptions_Mode { + if m != nil { + return m.Filenames + } + return EncryptionOptions_default +} + +func (m *EncryptionOptions) GetPolicyVersion() int64 { + if m != nil { + return m.PolicyVersion + } + return 0 +} + +type WrappedPolicyKey struct { + ProtectorDescriptor string `protobuf:"bytes,1,opt,name=protector_descriptor,json=protectorDescriptor,proto3" json:"protector_descriptor,omitempty"` + WrappedKey *WrappedKeyData `protobuf:"bytes,2,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WrappedPolicyKey) Reset() { *m = WrappedPolicyKey{} } +func (m *WrappedPolicyKey) String() string { return proto.CompactTextString(m) } +func (*WrappedPolicyKey) ProtoMessage() {} +func (*WrappedPolicyKey) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{4} +} +func (m *WrappedPolicyKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WrappedPolicyKey.Unmarshal(m, b) +} +func (m *WrappedPolicyKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WrappedPolicyKey.Marshal(b, m, deterministic) +} +func (dst *WrappedPolicyKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_WrappedPolicyKey.Merge(dst, src) +} +func (m *WrappedPolicyKey) XXX_Size() int { + return xxx_messageInfo_WrappedPolicyKey.Size(m) +} +func (m *WrappedPolicyKey) XXX_DiscardUnknown() { + xxx_messageInfo_WrappedPolicyKey.DiscardUnknown(m) +} + +var xxx_messageInfo_WrappedPolicyKey proto.InternalMessageInfo + +func (m *WrappedPolicyKey) GetProtectorDescriptor() string { + if m != nil { + return m.ProtectorDescriptor + } + return "" +} + +func (m *WrappedPolicyKey) GetWrappedKey() *WrappedKeyData { + if m != nil { + return m.WrappedKey + } + return nil +} + +// The associated data for each policy +type PolicyData struct { + KeyDescriptor string `protobuf:"bytes,1,opt,name=key_descriptor,json=keyDescriptor,proto3" json:"key_descriptor,omitempty"` + Options *EncryptionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + WrappedPolicyKeys []*WrappedPolicyKey `protobuf:"bytes,3,rep,name=wrapped_policy_keys,json=wrappedPolicyKeys,proto3" json:"wrapped_policy_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PolicyData) Reset() { *m = PolicyData{} } +func (m *PolicyData) String() string { return proto.CompactTextString(m) } +func (*PolicyData) ProtoMessage() {} +func (*PolicyData) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{5} +} +func (m *PolicyData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PolicyData.Unmarshal(m, b) +} +func (m *PolicyData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PolicyData.Marshal(b, m, deterministic) +} +func (dst *PolicyData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyData.Merge(dst, src) +} +func (m *PolicyData) XXX_Size() int { + return xxx_messageInfo_PolicyData.Size(m) +} +func (m *PolicyData) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyData.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyData proto.InternalMessageInfo + +func (m *PolicyData) GetKeyDescriptor() string { + if m != nil { + return m.KeyDescriptor + } + return "" +} + +func (m *PolicyData) GetOptions() *EncryptionOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *PolicyData) GetWrappedPolicyKeys() []*WrappedPolicyKey { + if m != nil { + return m.WrappedPolicyKeys + } + return nil +} + +// Data stored in the config file +type Config struct { + Source SourceType `protobuf:"varint,1,opt,name=source,proto3,enum=metadata.SourceType" json:"source,omitempty"` + HashCosts *HashingCosts `protobuf:"bytes,2,opt,name=hash_costs,json=hashCosts,proto3" json:"hash_costs,omitempty"` + Options *EncryptionOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` + UseFsKeyringForV1Policies bool `protobuf:"varint,5,opt,name=use_fs_keyring_for_v1_policies,json=useFsKeyringForV1Policies,proto3" json:"use_fs_keyring_for_v1_policies,omitempty"` + AllowCrossUserMetadata bool `protobuf:"varint,6,opt,name=allow_cross_user_metadata,json=allowCrossUserMetadata,proto3" json:"allow_cross_user_metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_31965d2849cb292a, []int{6} +} +func (m *Config) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config.Unmarshal(m, b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config.Marshal(b, m, deterministic) +} +func (dst *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(dst, src) +} +func (m *Config) XXX_Size() int { + return xxx_messageInfo_Config.Size(m) +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Config proto.InternalMessageInfo + +func (m *Config) GetSource() SourceType { + if m != nil { + return m.Source + } + return SourceType_default +} + +func (m *Config) GetHashCosts() *HashingCosts { + if m != nil { + return m.HashCosts + } + return nil +} + +func (m *Config) GetOptions() *EncryptionOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *Config) GetUseFsKeyringForV1Policies() bool { + if m != nil { + return m.UseFsKeyringForV1Policies + } + return false +} + +func (m *Config) GetAllowCrossUserMetadata() bool { + if m != nil { + return m.AllowCrossUserMetadata + } + return false +} + +func init() { + proto.RegisterType((*HashingCosts)(nil), "metadata.HashingCosts") + proto.RegisterType((*WrappedKeyData)(nil), "metadata.WrappedKeyData") + proto.RegisterType((*ProtectorData)(nil), "metadata.ProtectorData") + proto.RegisterType((*EncryptionOptions)(nil), "metadata.EncryptionOptions") + proto.RegisterType((*WrappedPolicyKey)(nil), "metadata.WrappedPolicyKey") + proto.RegisterType((*PolicyData)(nil), "metadata.PolicyData") + proto.RegisterType((*Config)(nil), "metadata.Config") + proto.RegisterEnum("metadata.SourceType", SourceType_name, SourceType_value) + proto.RegisterEnum("metadata.EncryptionOptions_Mode", EncryptionOptions_Mode_name, EncryptionOptions_Mode_value) +} + +func init() { proto.RegisterFile("metadata/metadata.proto", fileDescriptor_metadata_31965d2849cb292a) } + +var fileDescriptor_metadata_31965d2849cb292a = []byte{ + // 748 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdb, 0x6a, 0xf3, 0x46, + 0x10, 0xae, 0x24, 0xc7, 0x87, 0xf1, 0xa1, 0xca, 0xfe, 0x69, 0xaa, 0xb4, 0x50, 0x8c, 0x4b, 0x20, + 0x94, 0x90, 0x62, 0x97, 0x94, 0x06, 0x4a, 0x21, 0x75, 0x92, 0x36, 0x09, 0xa1, 0xe9, 0xda, 0x75, + 0x5b, 0x28, 0x88, 0x8d, 0xb4, 0xb6, 0x17, 0x4b, 0x5a, 0xb1, 0xbb, 0x8a, 0xd1, 0x5d, 0xef, 0xfa, + 0x00, 0x7d, 0x97, 0xf6, 0x65, 0xfa, 0x30, 0x45, 0x2b, 0xc9, 0x87, 0x04, 0x42, 0xf2, 0xdf, 0x98, + 0xd9, 0x6f, 0x67, 0xe6, 0x9b, 0xf9, 0x66, 0xc7, 0x82, 0x8f, 0x43, 0xaa, 0x88, 0x4f, 0x14, 0xf9, + 0xb2, 0x34, 0x4e, 0x62, 0xc1, 0x15, 0x47, 0xf5, 0xf2, 0xdc, 0xfb, 0x03, 0x5a, 0x3f, 0x12, 0x39, + 0x67, 0xd1, 0x6c, 0xc8, 0xa5, 0x92, 0x08, 0x41, 0x45, 0xb1, 0x90, 0x3a, 0x66, 0xd7, 0x38, 0xb2, + 0xb0, 0xb6, 0xd1, 0x3e, 0x54, 0x43, 0x1a, 0x72, 0x91, 0x3a, 0x96, 0x46, 0x8b, 0x13, 0xea, 0x42, + 0x33, 0x26, 0x82, 0x04, 0x01, 0x0d, 0x98, 0x0c, 0x9d, 0x8a, 0xbe, 0xdc, 0x84, 0x7a, 0xbf, 0x43, + 0xe7, 0x57, 0x41, 0xe2, 0x98, 0xfa, 0xb7, 0x34, 0xbd, 0x20, 0x8a, 0xa0, 0x0e, 0x98, 0xd7, 0x13, + 0xc7, 0xe8, 0x1a, 0x47, 0x2d, 0x6c, 0x5e, 0x4f, 0xd0, 0xe7, 0xd0, 0xa6, 0x91, 0x27, 0xd2, 0x58, + 0x51, 0xdf, 0x5d, 0xd0, 0x54, 0x13, 0xb7, 0x70, 0x6b, 0x05, 0xde, 0xd2, 0x34, 0x2b, 0x6a, 0x1e, + 0x12, 0x4f, 0xd3, 0xb7, 0xb0, 0xb6, 0x7b, 0x7f, 0x9b, 0xd0, 0xbe, 0x17, 0x5c, 0x51, 0x4f, 0x71, + 0xa1, 0x53, 0xf7, 0x61, 0x2f, 0x2e, 0x01, 0xd7, 0xa7, 0xd2, 0x13, 0x2c, 0x56, 0x5c, 0x68, 0xb2, + 0x06, 0x7e, 0xb7, 0xba, 0xbb, 0x58, 0x5d, 0xa1, 0x63, 0xa8, 0x4a, 0x9e, 0x08, 0x2f, 0xef, 0xb7, + 0x33, 0xd8, 0x3b, 0x59, 0x09, 0x35, 0xd2, 0xf8, 0x38, 0x8d, 0x29, 0x2e, 0x7c, 0xb2, 0x32, 0x22, + 0x12, 0x52, 0x5d, 0x46, 0x03, 0x6b, 0x1b, 0x1d, 0xc3, 0x8e, 0x97, 0x09, 0xa7, 0xbb, 0x6f, 0x0e, + 0xf6, 0xd7, 0x09, 0x36, 0x65, 0xc5, 0xb9, 0x53, 0x96, 0x41, 0x92, 0x40, 0x39, 0x3b, 0x79, 0x23, + 0x99, 0x8d, 0x6c, 0xb0, 0x12, 0xe6, 0x3b, 0x55, 0xad, 0x5e, 0x66, 0xa2, 0x33, 0x68, 0x2e, 0x73, + 0xd5, 0xb4, 0x22, 0x35, 0x9d, 0xd9, 0x59, 0x67, 0xde, 0x96, 0x14, 0xc3, 0x72, 0x75, 0xee, 0xfd, + 0x67, 0xc2, 0xee, 0x65, 0x2e, 0x1d, 0xe3, 0xd1, 0x4f, 0xfa, 0x57, 0x22, 0x07, 0x6a, 0x31, 0xf1, + 0x7d, 0x16, 0xcd, 0xb4, 0x18, 0x16, 0x2e, 0x8f, 0xe8, 0x5b, 0xa8, 0x7b, 0x3c, 0x52, 0x34, 0x52, + 0xb2, 0x90, 0xa0, 0xbb, 0xe6, 0x79, 0x96, 0xe8, 0xe4, 0x8e, 0xfb, 0x14, 0xaf, 0x22, 0xd0, 0x77, + 0xd0, 0x98, 0xb2, 0x80, 0x66, 0x42, 0x48, 0xad, 0xca, 0x6b, 0xc2, 0xd7, 0x21, 0xe8, 0x10, 0x3a, + 0x31, 0x0f, 0x98, 0x97, 0xba, 0x8f, 0x54, 0x48, 0xc6, 0xa3, 0xe2, 0x0d, 0xb5, 0x73, 0x74, 0x92, + 0x83, 0xbd, 0xbf, 0x0c, 0xa8, 0x64, 0xa1, 0xa8, 0x09, 0x35, 0x9f, 0x4e, 0x49, 0x12, 0x28, 0xfb, + 0x03, 0xf4, 0x21, 0x34, 0xcf, 0x2f, 0x47, 0xee, 0xe0, 0xf4, 0x6b, 0xf7, 0xb7, 0xf1, 0xc8, 0x36, + 0x36, 0x81, 0x1f, 0x86, 0x77, 0xb6, 0xb9, 0x09, 0x0c, 0xbf, 0x1f, 0xda, 0xd6, 0x16, 0x30, 0x1e, + 0xd9, 0x95, 0x12, 0xe8, 0x0f, 0xbe, 0xd1, 0x1e, 0x3b, 0x5b, 0xc0, 0x78, 0x64, 0x57, 0x51, 0x0b, + 0xea, 0xe7, 0x3e, 0x23, 0x91, 0x4a, 0x42, 0xbb, 0xd1, 0xfb, 0xd3, 0x00, 0xbb, 0x50, 0xff, 0x5e, + 0x97, 0x98, 0xbd, 0xce, 0xf7, 0x78, 0x77, 0x4f, 0x26, 0x6c, 0xbe, 0x61, 0xc2, 0xff, 0x18, 0x00, + 0x39, 0xb7, 0x7e, 0xf4, 0x87, 0xd0, 0x59, 0xd0, 0xf4, 0x39, 0x6d, 0x7b, 0x41, 0xd3, 0x0d, 0xc2, + 0x53, 0xa8, 0xf1, 0x7c, 0x08, 0x05, 0xd9, 0xa7, 0x2f, 0xcc, 0x09, 0x97, 0xbe, 0xe8, 0x06, 0xde, + 0x95, 0x75, 0x16, 0x83, 0x5a, 0xd0, 0x34, 0x1b, 0xb5, 0x75, 0xd4, 0x1c, 0x7c, 0xf2, 0xac, 0xde, + 0x95, 0x26, 0x78, 0x77, 0xf9, 0x04, 0x91, 0xbd, 0x7f, 0x4d, 0xa8, 0x0e, 0x79, 0x34, 0x65, 0xb3, + 0x8d, 0xb5, 0x33, 0x5e, 0xb1, 0x76, 0xa7, 0x00, 0x73, 0x22, 0xe7, 0x6e, 0xbe, 0x67, 0xe6, 0x8b, + 0x7b, 0xd6, 0xc8, 0x3c, 0xf3, 0x7f, 0xb2, 0x8d, 0x96, 0x2b, 0x6f, 0x68, 0xf9, 0x1c, 0x3e, 0x4b, + 0x24, 0x75, 0xa7, 0x32, 0x6b, 0x55, 0xb0, 0x68, 0xe6, 0x4e, 0xb9, 0x70, 0x1f, 0xfb, 0xb9, 0x00, + 0x8c, 0x4a, 0xbd, 0xbc, 0x75, 0x7c, 0x90, 0x48, 0x7a, 0x25, 0x6f, 0x73, 0x9f, 0x2b, 0x2e, 0x26, + 0xfd, 0xfb, 0xc2, 0x01, 0x9d, 0xc1, 0x01, 0x09, 0x02, 0xbe, 0x74, 0x3d, 0xc1, 0xa5, 0x74, 0x13, + 0x49, 0x85, 0x5b, 0x52, 0xeb, 0x3d, 0xaf, 0xe3, 0x7d, 0xed, 0x30, 0xcc, 0xee, 0x7f, 0x91, 0x54, + 0xdc, 0x15, 0xb7, 0x37, 0x95, 0xba, 0x65, 0x57, 0x70, 0xdb, 0xe3, 0x61, 0x4c, 0x14, 0x7b, 0x60, + 0x01, 0x53, 0xe9, 0x17, 0x3f, 0x03, 0xac, 0x65, 0xd9, 0x5e, 0x02, 0x04, 0x9d, 0x98, 0x84, 0x6e, + 0x4c, 0xa4, 0x8c, 0xe7, 0x82, 0x48, 0x6a, 0x1b, 0xe8, 0x23, 0xd8, 0xf5, 0x12, 0xa9, 0xf8, 0x16, + 0x6c, 0x66, 0x71, 0x82, 0x2c, 0xb3, 0xae, 0x6c, 0xeb, 0xa1, 0xaa, 0xbf, 0x03, 0x5f, 0xfd, 0x1f, + 0x00, 0x00, 0xff, 0xff, 0xe2, 0x78, 0x9e, 0x2e, 0x22, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/google/fscrypt/metadata/metadata.proto b/vendor/github.com/google/fscrypt/metadata/metadata.proto new file mode 100644 index 00000000000..84245e020c6 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/metadata.proto @@ -0,0 +1,107 @@ +/* + * metadata.proto - File which contains all of the metadata structures which we + * write to metadata files. Must be compiled with protoc to use the library. + * Compilation can be invoked with go generate. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// If you modify this file, be sure to run "go generate" on this package. +syntax = "proto3"; +package metadata; + +// Cost parameters to be used in our hashing functions. +message HashingCosts { + int64 time = 2; + int64 memory = 3; + int64 parallelism = 4; +} + +// This structure is used for our authenticated wrapping/unwrapping of keys. +message WrappedKeyData { + bytes IV = 1; + bytes encrypted_key = 2; + bytes hmac = 3; +} + +// Specifies the method in which an outside secret is obtained for a Protector +enum SourceType { + default = 0; + pam_passphrase = 1; + custom_passphrase = 2; + raw_key = 3; +} + +// The associated data for each protector +message ProtectorData { + string protector_descriptor = 1; + SourceType source = 2; + + // These are only used by some of the protector types + string name = 3; + HashingCosts costs = 4; + bytes salt = 5; + int64 uid = 6; + + WrappedKeyData wrapped_key = 7; +} + +// Encryption policy specifics, corresponds to the fscrypt_policy struct +message EncryptionOptions { + int64 padding = 1; + + // Type of encryption; should match declarations of unix.FSCRYPT_MODE + enum Mode { + default = 0; + AES_256_XTS = 1; + AES_256_GCM = 2; + AES_256_CBC = 3; + AES_256_CTS = 4; + AES_128_CBC = 5; + AES_128_CTS = 6; + Adiantum = 9; + } + + Mode contents = 2; + Mode filenames = 3; + + int64 policy_version = 4; +} + +message WrappedPolicyKey { + string protector_descriptor = 1; + WrappedKeyData wrapped_key = 2; +} + +// The associated data for each policy +message PolicyData { + string key_descriptor = 1; + EncryptionOptions options = 2; + repeated WrappedPolicyKey wrapped_policy_keys = 3; +} + +// Data stored in the config file +message Config { + SourceType source = 1; + HashingCosts hash_costs = 2; + EncryptionOptions options = 4; + bool use_fs_keyring_for_v1_policies = 5; + bool allow_cross_user_metadata = 6; + + // reserve the removed field 'string compatibility = 3;' + reserved 3; + reserved "compatibility"; +} diff --git a/vendor/github.com/google/fscrypt/metadata/policy.go b/vendor/github.com/google/fscrypt/metadata/policy.go new file mode 100644 index 00000000000..e218a0814d7 --- /dev/null +++ b/vendor/github.com/google/fscrypt/metadata/policy.go @@ -0,0 +1,348 @@ +/* + * policy.go - Functions for getting and setting policies on a specified + * directory or file. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package metadata + +import ( + "encoding/hex" + "fmt" + "log" + "math" + "os" + "os/user" + "strconv" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + "github.com/google/fscrypt/util" +) + +var ( + // ErrEncryptionNotSupported indicates that encryption is not supported + // on the given filesystem, and there is no way to enable it. + ErrEncryptionNotSupported = errors.New("encryption not supported") + + // ErrEncryptionNotEnabled indicates that encryption is not supported on + // the given filesystem, but there is a way to enable it. + ErrEncryptionNotEnabled = errors.New("encryption not enabled") +) + +// ErrAlreadyEncrypted indicates that the path is already encrypted. +type ErrAlreadyEncrypted struct { + Path string +} + +func (err *ErrAlreadyEncrypted) Error() string { + return fmt.Sprintf("file or directory %q is already encrypted", err.Path) +} + +// ErrBadEncryptionOptions indicates that unsupported encryption options were given. +type ErrBadEncryptionOptions struct { + Path string + Options *EncryptionOptions +} + +func (err *ErrBadEncryptionOptions) Error() string { + return fmt.Sprintf(`cannot encrypt %q because the kernel doesn't support the requested encryption options. + + The options are %s`, err.Path, err.Options) +} + +// ErrDirectoryNotOwned indicates a directory can't be encrypted because it's +// owned by another user. +type ErrDirectoryNotOwned struct { + Path string + Owner uint32 +} + +func (err *ErrDirectoryNotOwned) Error() string { + owner := strconv.Itoa(int(err.Owner)) + if u, e := user.LookupId(owner); e == nil && u.Username != "" { + owner = u.Username + } + return fmt.Sprintf(`cannot encrypt %q because it's owned by another user (%s). + + Encryption can only be enabled on a directory you own, even if you have + write access to the directory.`, err.Path, owner) +} + +// ErrNotEncrypted indicates that the path is not encrypted. +type ErrNotEncrypted struct { + Path string +} + +func (err *ErrNotEncrypted) Error() string { + return fmt.Sprintf("file or directory %q is not encrypted", err.Path) +} + +func policyIoctl(file *os.File, request uintptr, arg unsafe.Pointer) error { + _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), request, uintptr(arg)) + if errno == 0 { + return nil + } + return errno +} + +// Maps EncryptionOptions.Padding <-> FSCRYPT_POLICY_FLAGS +var ( + paddingArray = []int64{4, 8, 16, 32} + flagsArray = []int64{unix.FSCRYPT_POLICY_FLAGS_PAD_4, unix.FSCRYPT_POLICY_FLAGS_PAD_8, + unix.FSCRYPT_POLICY_FLAGS_PAD_16, unix.FSCRYPT_POLICY_FLAGS_PAD_32} +) + +// flagsToPadding returns the amount of padding specified in the policy flags. +func flagsToPadding(flags uint8) int64 { + paddingFlag := int64(flags & unix.FS_POLICY_FLAGS_PAD_MASK) + + // This lookup should always succeed + padding, ok := util.Lookup(paddingFlag, flagsArray, paddingArray) + if !ok { + log.Panicf("padding flag of %x not found", paddingFlag) + } + return padding +} + +func buildV1PolicyData(policy *unix.FscryptPolicyV1) *PolicyData { + return &PolicyData{ + KeyDescriptor: hex.EncodeToString(policy.Master_key_descriptor[:]), + Options: &EncryptionOptions{ + Padding: flagsToPadding(policy.Flags), + Contents: EncryptionOptions_Mode(policy.Contents_encryption_mode), + Filenames: EncryptionOptions_Mode(policy.Filenames_encryption_mode), + PolicyVersion: 1, + }, + } +} + +func buildV2PolicyData(policy *unix.FscryptPolicyV2) *PolicyData { + return &PolicyData{ + KeyDescriptor: hex.EncodeToString(policy.Master_key_identifier[:]), + Options: &EncryptionOptions{ + Padding: flagsToPadding(policy.Flags), + Contents: EncryptionOptions_Mode(policy.Contents_encryption_mode), + Filenames: EncryptionOptions_Mode(policy.Filenames_encryption_mode), + PolicyVersion: 2, + }, + } +} + +// GetPolicy returns the Policy data for the given directory or file (includes +// the KeyDescriptor and the encryption options). Returns an error if the +// path is not encrypted or the policy couldn't be retrieved. +func GetPolicy(path string) (*PolicyData, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + // First try the new version of the ioctl. This works for both v1 and v2 policies. + var arg unix.FscryptGetPolicyExArg + arg.Size = uint64(unsafe.Sizeof(arg.Policy)) + policyPtr := util.Ptr(arg.Policy[:]) + err = policyIoctl(file, unix.FS_IOC_GET_ENCRYPTION_POLICY_EX, unsafe.Pointer(&arg)) + if err == unix.ENOTTY { + // Fall back to the old version of the ioctl. This works for v1 policies only. + err = policyIoctl(file, unix.FS_IOC_GET_ENCRYPTION_POLICY, policyPtr) + arg.Size = uint64(unsafe.Sizeof(unix.FscryptPolicyV1{})) + } + switch err { + case nil: + break + case unix.ENOTTY: + return nil, ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return nil, ErrEncryptionNotEnabled + case unix.ENODATA, unix.ENOENT: + // ENOENT was returned instead of ENODATA on some filesystems before v4.11. + return nil, &ErrNotEncrypted{path} + default: + return nil, errors.Wrapf(err, "failed to get encryption policy of %q", path) + } + switch arg.Policy[0] { // arg.policy.version + case unix.FSCRYPT_POLICY_V1: + if arg.Size != uint64(unsafe.Sizeof(unix.FscryptPolicyV1{})) { + // should never happen + return nil, errors.New("unexpected size for v1 policy") + } + return buildV1PolicyData((*unix.FscryptPolicyV1)(policyPtr)), nil + case unix.FSCRYPT_POLICY_V2: + if arg.Size != uint64(unsafe.Sizeof(unix.FscryptPolicyV2{})) { + // should never happen + return nil, errors.New("unexpected size for v2 policy") + } + return buildV2PolicyData((*unix.FscryptPolicyV2)(policyPtr)), nil + default: + return nil, errors.Errorf("unsupported encryption policy version [%d]", + arg.Policy[0]) + } +} + +// For improved performance, use the DIRECT_KEY flag when using ciphers that +// support it, e.g. Adiantum. It is safe because fscrypt won't reuse the key +// for any other policy. (Multiple directories with same policy are okay.) +func shouldUseDirectKeyFlag(options *EncryptionOptions) bool { + // Contents and filenames encryption modes must be the same + if options.Contents != options.Filenames { + return false + } + // Currently only Adiantum supports DIRECT_KEY. + return options.Contents == EncryptionOptions_Adiantum +} + +func buildPolicyFlags(options *EncryptionOptions) uint8 { + // This lookup should always succeed (as policy is valid) + flags, ok := util.Lookup(options.Padding, paddingArray, flagsArray) + if !ok { + log.Panicf("padding of %d was not found", options.Padding) + } + if shouldUseDirectKeyFlag(options) { + flags |= unix.FSCRYPT_POLICY_FLAG_DIRECT_KEY + } + return uint8(flags) +} + +func setV1Policy(file *os.File, options *EncryptionOptions, descriptorBytes []byte) error { + policy := unix.FscryptPolicyV1{ + Version: unix.FSCRYPT_POLICY_V1, + Contents_encryption_mode: uint8(options.Contents), + Filenames_encryption_mode: uint8(options.Filenames), + Flags: uint8(buildPolicyFlags(options)), + } + + // The descriptor should always be the correct length (as policy is valid) + if len(descriptorBytes) != unix.FSCRYPT_KEY_DESCRIPTOR_SIZE { + log.Panic("wrong descriptor size for v1 policy") + } + copy(policy.Master_key_descriptor[:], descriptorBytes) + + return policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&policy)) +} + +func setV2Policy(file *os.File, options *EncryptionOptions, descriptorBytes []byte) error { + policy := unix.FscryptPolicyV2{ + Version: unix.FSCRYPT_POLICY_V2, + Contents_encryption_mode: uint8(options.Contents), + Filenames_encryption_mode: uint8(options.Filenames), + Flags: uint8(buildPolicyFlags(options)), + } + + // The descriptor should always be the correct length (as policy is valid) + if len(descriptorBytes) != unix.FSCRYPT_KEY_IDENTIFIER_SIZE { + log.Panic("wrong descriptor size for v2 policy") + } + copy(policy.Master_key_identifier[:], descriptorBytes) + + return policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&policy)) +} + +// SetPolicy sets up the specified directory to be encrypted with the specified +// policy. Returns an error if we cannot set the policy for any reason (not a +// directory, invalid options or KeyDescriptor, etc). +func SetPolicy(path string, data *PolicyData) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + if err = data.CheckValidity(); err != nil { + return errors.Wrap(err, "invalid policy") + } + + descriptorBytes, err := hex.DecodeString(data.KeyDescriptor) + if err != nil { + return errors.New("invalid key descriptor: " + data.KeyDescriptor) + } + + switch data.Options.PolicyVersion { + case 1: + err = setV1Policy(file, data.Options, descriptorBytes) + case 2: + err = setV2Policy(file, data.Options, descriptorBytes) + default: + err = errors.Errorf("policy version of %d is invalid", data.Options.PolicyVersion) + } + if err == unix.EINVAL { + // Before kernel v4.11, many different errors all caused unix.EINVAL to be returned. + // We try to disambiguate this error here. This disambiguation will not always give + // the correct error due to a potential race condition on path. + if info, statErr := os.Stat(path); statErr != nil || !info.IsDir() { + // Checking if the path is not a directory + err = unix.ENOTDIR + } else if _, policyErr := GetPolicy(path); policyErr == nil { + // Checking if a policy is already set on this directory + err = unix.EEXIST + } + } + switch err { + case nil: + return nil + case unix.EACCES: + var stat unix.Stat_t + if statErr := unix.Stat(path, &stat); statErr == nil && stat.Uid != uint32(os.Geteuid()) { + return &ErrDirectoryNotOwned{path, stat.Uid} + } + case unix.EEXIST: + return &ErrAlreadyEncrypted{path} + case unix.EINVAL: + return &ErrBadEncryptionOptions{path, data.Options} + case unix.ENOTTY: + return ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return ErrEncryptionNotEnabled + } + return errors.Wrapf(err, "failed to set encryption policy on %q", path) +} + +// CheckSupport returns an error if the filesystem containing path does not +// support filesystem encryption. This can be for many reasons including an +// incompatible kernel or filesystem or not enabling the right feature flags. +func CheckSupport(path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + // On supported directories, giving a bad policy will return EINVAL + badPolicy := unix.FscryptPolicyV1{ + Version: math.MaxUint8, + Contents_encryption_mode: math.MaxUint8, + Filenames_encryption_mode: math.MaxUint8, + Flags: math.MaxUint8, + } + + err = policyIoctl(file, unix.FS_IOC_SET_ENCRYPTION_POLICY, unsafe.Pointer(&badPolicy)) + switch err { + case nil: + log.Panicf(`FS_IOC_SET_ENCRYPTION_POLICY succeeded when it should have failed. + Please open an issue, filesystem %q may be corrupted.`, path) + case unix.EINVAL, unix.EACCES: + return nil + case unix.ENOTTY: + return ErrEncryptionNotSupported + case unix.EOPNOTSUPP: + return ErrEncryptionNotEnabled + } + return errors.Wrapf(err, "unexpected error checking for encryption support on filesystem %q", path) +} diff --git a/vendor/github.com/google/fscrypt/security/cache.go b/vendor/github.com/google/fscrypt/security/cache.go new file mode 100644 index 00000000000..f11248d2bd1 --- /dev/null +++ b/vendor/github.com/google/fscrypt/security/cache.go @@ -0,0 +1,49 @@ +/* + * cache.go - Handles cache clearing and management. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package security + +import ( + "log" + "os" + + "golang.org/x/sys/unix" +) + +// DropFilesystemCache instructs the kernel to free the reclaimable inodes and +// dentries. This has the effect of making encrypted directories whose keys are +// not present no longer accessible. Requires root privileges. +func DropFilesystemCache() error { + // Dirty reclaimable inodes must be synced so that they will be freed. + log.Print("syncing changes to filesystem") + unix.Sync() + + // See: https://www.kernel.org/doc/Documentation/sysctl/vm.txt + log.Print("freeing reclaimable inodes and dentries") + file, err := os.OpenFile("/proc/sys/vm/drop_caches", os.O_WRONLY|os.O_SYNC, 0) + if err != nil { + return err + } + defer file.Close() + // "2" just frees the reclaimable inodes and dentries. The associated + // pages to these inodes will be freed. We do not need to free the + // entire pagecache (as this will severely impact performance). + _, err = file.WriteString("2") + return err +} diff --git a/vendor/github.com/google/fscrypt/security/privileges.go b/vendor/github.com/google/fscrypt/security/privileges.go new file mode 100644 index 00000000000..5bdd43c5d9a --- /dev/null +++ b/vendor/github.com/google/fscrypt/security/privileges.go @@ -0,0 +1,156 @@ +/* + * privileges.go - Functions for managing users and privileges. + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package security manages: +// - Cache clearing (cache.go) +// - Privilege manipulation (privileges.go) +package security + +// Use the libc versions of setreuid, setregid, and setgroups instead of the +// "sys/unix" versions. The "sys/unix" versions use the raw syscalls which +// operate on the calling thread only, whereas the libc versions operate on the +// whole process. And we need to operate on the whole process, firstly for +// pam_fscrypt to prevent the privileges of Go worker threads from diverging +// from the PAM stack's "main" thread, violating libc's assumption and causing +// an abort() later in the PAM stack; and secondly because Go code may migrate +// between OS-level threads while it's running. +// +// See also: https://github.com/golang/go/issues/1435 + +/* +#define _GNU_SOURCE // for getresuid and setresuid +#include +#include // getting and setting uids and gids +#include // setgroups +*/ +import "C" + +import ( + "log" + "os/user" + "syscall" + + "github.com/pkg/errors" + + "github.com/google/fscrypt/util" +) + +// Privileges encapsulate the effective uid/gid and groups of a process. +type Privileges struct { + euid C.uid_t + egid C.gid_t + groups []C.gid_t +} + +// ProcessPrivileges returns the process's current effective privileges. +func ProcessPrivileges() (*Privileges, error) { + ruid := C.getuid() + euid := C.geteuid() + rgid := C.getgid() + egid := C.getegid() + + var groups []C.gid_t + n, err := C.getgroups(0, nil) + if n < 0 { + return nil, err + } + // If n == 0, the user isn't in any groups, so groups == nil is fine. + if n > 0 { + groups = make([]C.gid_t, n) + n, err = C.getgroups(n, &groups[0]) + if n < 0 { + return nil, err + } + groups = groups[:n] + } + log.Printf("Current privs (real, effective): uid=(%d,%d) gid=(%d,%d) groups=%v", + ruid, euid, rgid, egid, groups) + return &Privileges{euid, egid, groups}, nil +} + +// UserPrivileges returns the default privileges for the specified user. +func UserPrivileges(user *user.User) (*Privileges, error) { + privs := &Privileges{ + euid: C.uid_t(util.AtoiOrPanic(user.Uid)), + egid: C.gid_t(util.AtoiOrPanic(user.Gid)), + } + userGroups, err := user.GroupIds() + if err != nil { + return nil, util.SystemError(err.Error()) + } + privs.groups = make([]C.gid_t, len(userGroups)) + for i, group := range userGroups { + privs.groups[i] = C.gid_t(util.AtoiOrPanic(group)) + } + return privs, nil +} + +// SetProcessPrivileges sets the privileges of the current process to have those +// specified by privs. The original privileges can be obtained by first saving +// the output of ProcessPrivileges, calling SetProcessPrivileges with the +// desired privs, then calling SetProcessPrivileges with the saved privs. +func SetProcessPrivileges(privs *Privileges) error { + log.Printf("Setting euid=%d egid=%d groups=%v", privs.euid, privs.egid, privs.groups) + + // If setting privs as root, we need to set the euid to 0 first, so that + // we will have the necessary permissions to make the other changes to + // the groups/egid/euid, regardless of our original euid. + C.seteuid(0) + + // Separately handle the case where the user is in no groups. + numGroups := C.size_t(len(privs.groups)) + groupsPtr := (*C.gid_t)(nil) + if numGroups > 0 { + groupsPtr = &privs.groups[0] + } + + if res, err := C.setgroups(numGroups, groupsPtr); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting groups") + } + if res, err := C.setegid(privs.egid); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting egid") + } + if res, err := C.seteuid(privs.euid); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting euid") + } + ProcessPrivileges() + return nil +} + +// SetUids sets the process's real, effective, and saved UIDs. +func SetUids(ruid, euid, suid int) error { + log.Printf("Setting ruid=%d euid=%d suid=%d", ruid, euid, suid) + // We elevate all the privs before setting them. This prevents issues + // with (ruid=1000,euid=1000,suid=0), where just a single call to + // setresuid might fail with permission denied. + if res, err := C.setresuid(0, 0, 0); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting uids") + } + if res, err := C.setresuid(C.uid_t(ruid), C.uid_t(euid), C.uid_t(suid)); res < 0 { + return errors.Wrapf(err.(syscall.Errno), "setting uids") + } + return nil +} + +// GetUids gets the process's real, effective, and saved UIDs. +func GetUids() (int, int, int) { + var ruid, euid, suid C.uid_t + C.getresuid(&ruid, &euid, &suid) + return int(ruid), int(euid), int(suid) +} diff --git a/vendor/github.com/google/fscrypt/util/errors.go b/vendor/github.com/google/fscrypt/util/errors.go new file mode 100644 index 00000000000..3c87a2c4c85 --- /dev/null +++ b/vendor/github.com/google/fscrypt/util/errors.go @@ -0,0 +1,135 @@ +/* + * errors.go - Custom errors and error functions used by fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package util + +import ( + "fmt" + "io" + "log" + "os" + + "github.com/pkg/errors" +) + +// ErrReader wraps an io.Reader, passing along calls to Read() until a read +// fails. Then, the error is stored, and all subsequent calls to Read() do +// nothing. This allows you to write code which has many subsequent reads and +// do all of the error checking at the end. For example: +// +// r := NewErrReader(reader) +// r.Read(foo) +// r.Read(bar) +// r.Read(baz) +// if r.Err() != nil { +// // Handle error +// } +// +// Taken from https://blog.golang.org/errors-are-values by Rob Pike. +type ErrReader struct { + r io.Reader + err error +} + +// NewErrReader creates an ErrReader which wraps the provided reader. +func NewErrReader(reader io.Reader) *ErrReader { + return &ErrReader{r: reader, err: nil} +} + +// Read runs ReadFull on the wrapped reader if no errors have occurred. +// Otherwise, the previous error is just returned and no reads are attempted. +func (e *ErrReader) Read(p []byte) (n int, err error) { + if e.err == nil { + n, e.err = io.ReadFull(e.r, p) + } + return n, e.err +} + +// Err returns the first encountered err (or nil if no errors occurred). +func (e *ErrReader) Err() error { + return e.err +} + +// ErrWriter works exactly like ErrReader, except with io.Writer. +type ErrWriter struct { + w io.Writer + err error +} + +// NewErrWriter creates an ErrWriter which wraps the provided writer. +func NewErrWriter(writer io.Writer) *ErrWriter { + return &ErrWriter{w: writer, err: nil} +} + +// Write runs the wrapped writer's Write if no errors have occurred. Otherwise, +// the previous error is just returned and no writes are attempted. +func (e *ErrWriter) Write(p []byte) (n int, err error) { + if e.err == nil { + n, e.err = e.w.Write(p) + } + return n, e.err +} + +// Err returns the first encountered err (or nil if no errors occurred). +func (e *ErrWriter) Err() error { + return e.err +} + +// CheckValidLength returns an invalid length error if expected != actual +func CheckValidLength(expected, actual int) error { + if expected == actual { + return nil + } + return fmt.Errorf("expected length of %d, got %d", expected, actual) +} + +// SystemError is an error that should indicate something has gone wrong in the +// underlying system (syscall failure, bad ioctl, etc...). +type SystemError string + +func (s SystemError) Error() string { + return "system error: " + string(s) +} + +// NeverError panics if a non-nil error is passed in. It should be used to check +// for logic errors, not to handle recoverable errors. +func NeverError(err error) { + if err != nil { + log.Panicf("NeverError() check failed: %v", err) + } +} + +var ( + // testEnvVarName is the name of an environment variable that should be + // set to an empty mountpoint. This is only used for integration tests. + // If not set, integration tests are skipped. + testEnvVarName = "TEST_FILESYSTEM_ROOT" + // ErrSkipIntegration indicates integration tests shouldn't be run. + ErrSkipIntegration = errors.New("skipping integration test") +) + +// TestRoot returns a the root of a filesystem specified by testEnvVarName. This +// function is only used for integration tests. +func TestRoot() (string, error) { + path := os.Getenv(testEnvVarName) + if path == "" { + return "", ErrSkipIntegration + } + return path, nil +} diff --git a/vendor/github.com/google/fscrypt/util/util.go b/vendor/github.com/google/fscrypt/util/util.go new file mode 100644 index 00000000000..1dab335b8e4 --- /dev/null +++ b/vendor/github.com/google/fscrypt/util/util.go @@ -0,0 +1,163 @@ +/* + * util.go - Various helpers used throughout fscrypt + * + * Copyright 2017 Google Inc. + * Author: Joe Richey (joerichey@google.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +// Package util contains useful components for simplifying Go code. +// +// The package contains common error types (errors.go) and functions for +// converting arrays to pointers. +package util + +import ( + "bufio" + "fmt" + "log" + "os" + "os/user" + "strconv" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Ptr converts a Go byte array to a pointer to the start of the array. +func Ptr(slice []byte) unsafe.Pointer { + if len(slice) == 0 { + return nil + } + return unsafe.Pointer(&slice[0]) +} + +// ByteSlice takes a pointer to some data and views it as a slice of bytes. +// Note, indexing into this slice is unsafe. +func ByteSlice(ptr unsafe.Pointer) []byte { + // Slice must fit in the smallest address space go supports. + return (*[1 << 30]byte)(ptr)[:] +} + +// PointerSlice takes a pointer to an array of pointers and views it as a slice +// of pointers. Note, indexing into this slice is unsafe. +func PointerSlice(ptr unsafe.Pointer) []unsafe.Pointer { + // Slice must fit in the smallest address space go supports. + return (*[1 << 28]unsafe.Pointer)(ptr)[:] +} + +// Index returns the first index i such that inVal == inArray[i]. +// ok is true if we find a match, false otherwise. +func Index(inVal int64, inArray []int64) (index int, ok bool) { + for index, val := range inArray { + if val == inVal { + return index, true + } + } + return 0, false +} + +// Lookup finds inVal in inArray and returns the corresponding element in +// outArray. Specifically, if inVal == inArray[i], outVal == outArray[i]. +// ok is true if we find a match, false otherwise. +func Lookup(inVal int64, inArray, outArray []int64) (outVal int64, ok bool) { + index, ok := Index(inVal, inArray) + if !ok { + return 0, false + } + return outArray[index], true +} + +// MinInt returns the lesser of a and b. +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} + +// MaxInt returns the greater of a and b. +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +// MinInt64 returns the lesser of a and b. +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +// ReadLine returns a line of input from standard input. An empty string is +// returned if the user didn't insert anything or on error. +func ReadLine() (string, error) { + scanner := bufio.NewScanner(os.Stdin) + scanner.Scan() + return scanner.Text(), scanner.Err() +} + +// AtoiOrPanic converts a string to an int or it panics. Should only be used in +// situations where the input MUST be a decimal number. +func AtoiOrPanic(input string) int { + i, err := strconv.Atoi(input) + if err != nil { + panic(err) + } + return i +} + +// UserFromUID returns the User corresponding to the given user id. +func UserFromUID(uid int64) (*user.User, error) { + return user.LookupId(strconv.FormatInt(uid, 10)) +} + +// EffectiveUser returns the user entry corresponding to the effective user. +func EffectiveUser() (*user.User, error) { + return UserFromUID(int64(os.Geteuid())) +} + +// IsUserRoot checks if the effective user is root. +func IsUserRoot() bool { + return os.Geteuid() == 0 +} + +// Chown changes the owner of a File to a User. +func Chown(file *os.File, user *user.User) error { + uid := AtoiOrPanic(user.Uid) + gid := AtoiOrPanic(user.Gid) + return file.Chown(uid, gid) +} + +// IsKernelVersionAtLeast returns true if the Linux kernel version is at least +// major.minor. If something goes wrong it assumes false. +func IsKernelVersionAtLeast(major, minor int) bool { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + log.Printf("Uname failed [%v], assuming old kernel", err) + return false + } + release := string(uname.Release[:]) + log.Printf("Kernel version is %s", release) + var actualMajor, actualMinor int + if n, _ := fmt.Sscanf(release, "%d.%d", &actualMajor, &actualMinor); n != 2 { + log.Printf("Unrecognized uname format %q, assuming old kernel", release) + return false + } + return actualMajor > major || + (actualMajor == major && actualMinor >= minor) +} diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore new file mode 100644 index 00000000000..d8b32652e5a --- /dev/null +++ b/vendor/github.com/pkg/xattr/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.swp diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE new file mode 100644 index 00000000000..99d2e9dc8ff --- /dev/null +++ b/vendor/github.com/pkg/xattr/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012 Dave Cheney. All rights reserved. +Copyright (c) 2014 Kuba Podgórski. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md new file mode 100644 index 00000000000..0662c0208c5 --- /dev/null +++ b/vendor/github.com/pkg/xattr/README.md @@ -0,0 +1,45 @@ +[![GoDoc](https://godoc.org/github.com/pkg/xattr?status.svg)](http://godoc.org/github.com/pkg/xattr) +[![Go Report Card](https://goreportcard.com/badge/github.com/pkg/xattr)](https://goreportcard.com/report/github.com/pkg/xattr) +[![Build Status](https://github.com/pkg/xattr/workflows/build/badge.svg)](https://github.com/pkg/xattr/actions?query=workflow%3Abuild) +[![Codecov](https://codecov.io/gh/pkg/xattr/branch/master/graph/badge.svg)](https://codecov.io/gh/pkg/xattr) + +xattr +===== +Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris). + +"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes) + +`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored. + +The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they +do not reference a symlink that appears at the end of a path. See +[GoDoc](http://godoc.org/github.com/pkg/xattr) for details. + +### Example +```go + const path = "/tmp/myfile" + const prefix = "user." + + if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil { + log.Fatal(err) + } + + var list []string + if list, err = xattr.List(path); err != nil { + log.Fatal(err) + } + + var data []byte + if data, err = xattr.Get(path, prefix+"test"); err != nil { + log.Fatal(err) + } + + if err = xattr.Remove(path, prefix+"test"); err != nil { + log.Fatal(err) + } + + // One can also specify the flags parameter to be passed to the OS. + if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil { + log.Fatal(err) + } +``` diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go new file mode 100644 index 00000000000..f982da30461 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr.go @@ -0,0 +1,255 @@ +/* +Package xattr provides support for extended attributes on linux, darwin and freebsd. +Extended attributes are name:value pairs associated permanently with files and directories, +similar to the environment strings associated with a process. +An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty. +More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes . + +All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L" +variant will not follow a symlink at the end of the path, and "F" variant accepts +a file descriptor instead of a path. + +Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are +symlinks: +Get will follow "symlink1" and "symlink2" and operate on the target of +"symlink2". LGet will follow "symlink1" but operate directly on "symlink2". +*/ +package xattr + +import ( + "os" + "syscall" +) + +// Error records an error and the operation, file path and attribute that caused it. +type Error struct { + Op string + Path string + Name string + Err error +} + +func (e *Error) Error() (errstr string) { + if e.Op != "" { + errstr += e.Op + } + if e.Path != "" { + if errstr != "" { + errstr += " " + } + errstr += e.Path + } + if e.Name != "" { + if errstr != "" { + errstr += " " + } + errstr += e.Name + } + if e.Err != nil { + if errstr != "" { + errstr += ": " + } + errstr += e.Err.Error() + } + return +} + +// Get retrieves extended attribute data associated with path. It will follow +// all symlinks along the path. +func Get(path, name string) ([]byte, error) { + return get(path, name, func(name string, data []byte) (int, error) { + return getxattr(path, name, data) + }) +} + +// LGet is like Get but does not follow a symlink at the end of the path. +func LGet(path, name string) ([]byte, error) { + return get(path, name, func(name string, data []byte) (int, error) { + return lgetxattr(path, name, data) + }) +} + +// FGet is like Get but accepts a os.File instead of a file path. +func FGet(f *os.File, name string) ([]byte, error) { + return get(f.Name(), name, func(name string, data []byte) (int, error) { + return fgetxattr(f, name, data) + }) +} + +type getxattrFunc func(name string, data []byte) (int, error) + +// get contains the buffer allocation logic used by both Get and LGet. +func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) { + const ( + // Start with a 1 KB buffer for the xattr value + initialBufSize = 1024 + + // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's + // much smaller at 64 KB. Unless the kernel is evil or buggy, we should never + // hit the limit. + maxBufSize = 64 * 1024 * 1024 + + // Function name as reported in error messages + myname = "xattr.get" + ) + + size := initialBufSize + for { + data := make([]byte, size) + read, err := getxattrFunc(name, data) + + // If the buffer was too small to fit the value, Linux and MacOS react + // differently: + // Linux: returns an ERANGE error and "-1" bytes. + // MacOS: truncates the value and returns "size" bytes. If the value + // happens to be exactly as big as the buffer, we cannot know if it was + // truncated, and we retry with a bigger buffer. Contrary to documentation, + // MacOS never seems to return ERANGE! + // To keep the code simple, we always check both conditions, and sometimes + // double the buffer size without it being strictly necessary. + if err == syscall.ERANGE || read == size { + // The buffer was too small. Try again. + size <<= 1 + if size >= maxBufSize { + return nil, &Error{myname, path, name, syscall.EOVERFLOW} + } + continue + } + if err != nil { + return nil, &Error{myname, path, name, err} + } + return data[:read], nil + } +} + +// Set associates name and data together as an attribute of path. +func Set(path, name string, data []byte) error { + if err := setxattr(path, name, data, 0); err != nil { + return &Error{"xattr.Set", path, name, err} + } + return nil +} + +// LSet is like Set but does not follow a symlink at +// the end of the path. +func LSet(path, name string, data []byte) error { + if err := lsetxattr(path, name, data, 0); err != nil { + return &Error{"xattr.LSet", path, name, err} + } + return nil +} + +// FSet is like Set but accepts a os.File instead of a file path. +func FSet(f *os.File, name string, data []byte) error { + if err := fsetxattr(f, name, data, 0); err != nil { + return &Error{"xattr.FSet", f.Name(), name, err} + } + return nil +} + +// SetWithFlags associates name and data together as an attribute of path. +// Forwards the flags parameter to the syscall layer. +func SetWithFlags(path, name string, data []byte, flags int) error { + if err := setxattr(path, name, data, flags); err != nil { + return &Error{"xattr.SetWithFlags", path, name, err} + } + return nil +} + +// LSetWithFlags is like SetWithFlags but does not follow a symlink at +// the end of the path. +func LSetWithFlags(path, name string, data []byte, flags int) error { + if err := lsetxattr(path, name, data, flags); err != nil { + return &Error{"xattr.LSetWithFlags", path, name, err} + } + return nil +} + +// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path. +func FSetWithFlags(f *os.File, name string, data []byte, flags int) error { + if err := fsetxattr(f, name, data, flags); err != nil { + return &Error{"xattr.FSetWithFlags", f.Name(), name, err} + } + return nil +} + +// Remove removes the attribute associated with the given path. +func Remove(path, name string) error { + if err := removexattr(path, name); err != nil { + return &Error{"xattr.Remove", path, name, err} + } + return nil +} + +// LRemove is like Remove but does not follow a symlink at the end of the +// path. +func LRemove(path, name string) error { + if err := lremovexattr(path, name); err != nil { + return &Error{"xattr.LRemove", path, name, err} + } + return nil +} + +// FRemove is like Remove but accepts a os.File instead of a file path. +func FRemove(f *os.File, name string) error { + if err := fremovexattr(f, name); err != nil { + return &Error{"xattr.FRemove", f.Name(), name, err} + } + return nil +} + +// List retrieves a list of names of extended attributes associated +// with the given path in the file system. +func List(path string) ([]string, error) { + return list(path, func(data []byte) (int, error) { + return listxattr(path, data) + }) +} + +// LList is like List but does not follow a symlink at the end of the +// path. +func LList(path string) ([]string, error) { + return list(path, func(data []byte) (int, error) { + return llistxattr(path, data) + }) +} + +// FList is like List but accepts a os.File instead of a file path. +func FList(f *os.File) ([]string, error) { + return list(f.Name(), func(data []byte) (int, error) { + return flistxattr(f, data) + }) +} + +type listxattrFunc func(data []byte) (int, error) + +// list contains the buffer allocation logic used by both List and LList. +func list(path string, listxattrFunc listxattrFunc) ([]string, error) { + myname := "xattr.list" + // find size. + size, err := listxattrFunc(nil) + if err != nil { + return nil, &Error{myname, path, "", err} + } + if size > 0 { + // `size + 1` because of ERANGE error when reading + // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16). + buf := make([]byte, size+1) + // Read into buffer of that size. + read, err := listxattrFunc(buf) + if err != nil { + return nil, &Error{myname, path, "", err} + } + return stringsFromByteSlice(buf[:read]), nil + } + return []string{}, nil +} + +// bytePtrFromSlice returns a pointer to array of bytes and a size. +func bytePtrFromSlice(data []byte) (ptr *byte, size int) { + size = len(data) + if size > 0 { + ptr = &data[0] + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go new file mode 100644 index 00000000000..f4a3f953904 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_bsd.go @@ -0,0 +1,201 @@ +//go:build freebsd || netbsd +// +build freebsd netbsd + +package xattr + +import ( + "os" + "syscall" + "unsafe" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + EXTATTR_NAMESPACE_USER = 1 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENOATTR +) + +func getxattr(path string, name string, data []byte) (int, error) { + return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data) +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return getxattr(f.Name(), name, data) +} + +// sysGet is called by getxattr and lgetxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_get_file( + const char *path, + int attrnamespace, + const char *attrname, + void *data, + size_t nbytes); + + ssize_t extattr_get_link( + const char *path, + int attrnamespace, + const char *attrname, + void *data, + size_t nbytes); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) + if err != syscall.Errno(0) { + return int(r0), err + } + return int(r0), nil +} + +func setxattr(path string, name string, data []byte, flags int) error { + return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return setxattr(f.Name(), name, data, flags) +} + +// sysSet is called by setxattr and lsetxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysSet(syscallNum uintptr, path string, name string, data []byte) error { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_set_file( + const char *path, + int attrnamespace, + const char *attrname, + const void *data, + size_t nbytes + ); + + ssize_t extattr_set_link( + const char *path, + int attrnamespace, + const char *attrname, + const void *data, + size_t nbytes + ); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) + if err != syscall.Errno(0) { + return err + } + if int(r0) != nbytes { + return syscall.E2BIG + } + return nil +} + +func removexattr(path string, name string) error { + return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name) +} + +func lremovexattr(path string, name string) error { + return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name) +} + +func fremovexattr(f *os.File, name string) error { + return removexattr(f.Name(), name) +} + +// sysSet is called by removexattr and lremovexattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysRemove(syscallNum uintptr, path string, name string) error { + /* + int extattr_delete_file( + const char *path, + int attrnamespace, + const char *attrname + ); + + int extattr_delete_link( + const char *path, + int attrnamespace, + const char *attrname + ); + */ + _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + ) + if err != syscall.Errno(0) { + return err + } + return nil +} + +func listxattr(path string, data []byte) (int, error) { + return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data) +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return listxattr(f.Name(), data) +} + +// sysSet is called by listxattr and llistxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysList(syscallNum uintptr, path string, data []byte) (int, error) { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_list_file( + const char *path, + int attrnamespace, + void *data, + size_t nbytes + ); + + ssize_t extattr_list_link( + const char *path, + int attrnamespace, + void *data, + size_t nbytes + ); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0) + if err != syscall.Errno(0) { + return int(r0), err + } + return int(r0), nil +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On FreeBSD, each entry consists of a single byte containing the length +// of the attribute name, followed by the attribute name. +// The name is _not_ terminated by NULL. +func stringsFromByteSlice(buf []byte) (result []string) { + index := 0 + for index < len(buf) { + next := index + 1 + int(buf[index]) + result = append(result, string(buf[index+1:next])) + index = next + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go new file mode 100644 index 00000000000..ee7a501dae5 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_darwin.go @@ -0,0 +1,90 @@ +//go:build darwin +// +build darwin + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_NOFOLLOW = 0x0001 + XATTR_CREATE = 0x0002 + XATTR_REPLACE = 0x0004 + XATTR_NOSECURITY = 0x0008 + XATTR_NODEFAULT = 0x0010 + XATTR_SHOWCOMPRESSION = 0x0020 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENOATTR +) + +func getxattr(path string, name string, data []byte) (int, error) { + return unix.Getxattr(path, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return unix.Lgetxattr(path, name, data) +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return getxattr(f.Name(), name, data) +} + +func setxattr(path string, name string, data []byte, flags int) error { + return unix.Setxattr(path, name, data, flags) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return unix.Lsetxattr(path, name, data, flags) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return setxattr(f.Name(), name, data, flags) +} + +func removexattr(path string, name string) error { + return unix.Removexattr(path, name) +} + +func lremovexattr(path string, name string) error { + return unix.Lremovexattr(path, name) +} + +func fremovexattr(f *os.File, name string) error { + return removexattr(f.Name(), name) +} + +func listxattr(path string, data []byte) (int, error) { + return unix.Listxattr(path, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return unix.Llistxattr(path, data) +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return listxattr(f.Name(), data) +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go new file mode 100644 index 00000000000..879085ee5d4 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_linux.go @@ -0,0 +1,142 @@ +//go:build linux +// +build linux + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_CREATE = unix.XATTR_CREATE + XATTR_REPLACE = unix.XATTR_REPLACE + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENODATA +) + +// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system +// calls. This function works around this by retrying system calls until they +// stop returning EINTR. +// +// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce. +func ignoringEINTR(fn func() error) (err error) { + for { + err = fn() + if err != unix.EINTR { + break + } + } + return err +} + +func getxattr(path string, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Getxattr(path, name, data) + return err + }) + return r, err +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Lgetxattr(path, name, data) + return err + }) + return r, err +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Fgetxattr(int(f.Fd()), name, data) + return err + }) + return r, err +} + +func setxattr(path string, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Setxattr(path, name, data, flags) + }) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Lsetxattr(path, name, data, flags) + }) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Fsetxattr(int(f.Fd()), name, data, flags) + }) +} + +func removexattr(path string, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Removexattr(path, name) + }) +} + +func lremovexattr(path string, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Lremovexattr(path, name) + }) +} + +func fremovexattr(f *os.File, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Fremovexattr(int(f.Fd()), name) + }) +} + +func listxattr(path string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Listxattr(path, data) + return err + }) + return r, err +} + +func llistxattr(path string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Llistxattr(path, data) + return err + }) + return r, err +} + +func flistxattr(f *os.File, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Flistxattr(int(f.Fd()), data) + return err + }) + return r, err +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go new file mode 100644 index 00000000000..38d88d609c6 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_solaris.go @@ -0,0 +1,165 @@ +//go:build solaris +// +build solaris + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENODATA +) + +func getxattr(path string, name string, data []byte) (int, error) { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer func() { + _ = f.Close() + }() + return fgetxattr(f, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return 0, unix.ENOTSUP +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return 0, err + } + defer func() { + _ = unix.Close(fd) + }() + return unix.Read(fd, data) +} + +func setxattr(path string, name string, data []byte, flags int) error { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return err + } + err = fsetxattr(f, name, data, flags) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return unix.ENOTSUP +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + mode := unix.O_WRONLY | unix.O_XATTR + if flags&XATTR_REPLACE != 0 { + mode |= unix.O_TRUNC + } else if flags&XATTR_CREATE != 0 { + mode |= unix.O_CREAT | unix.O_EXCL + } else { + mode |= unix.O_CREAT | unix.O_TRUNC + } + fd, err := unix.Openat(int(f.Fd()), name, mode, 0666) + if err != nil { + return err + } + if _, err = unix.Write(fd, data); err != nil { + _ = unix.Close(fd) + return err + } + return unix.Close(fd) +} + +func removexattr(path string, name string) error { + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return err + } + f := os.NewFile(uintptr(fd), path) + defer func() { + _ = f.Close() + }() + return fremovexattr(f, name) +} + +func lremovexattr(path string, name string) error { + return unix.ENOTSUP +} + +func fremovexattr(f *os.File, name string) error { + fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0) + if err != nil { + return err + } + defer func() { + _ = unix.Close(fd) + }() + return unix.Unlinkat(fd, name, 0) +} + +func listxattr(path string, data []byte) (int, error) { + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer func() { + _ = f.Close() + }() + return flistxattr(f, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return 0, unix.ENOTSUP +} + +func flistxattr(f *os.File, data []byte) (int, error) { + fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return 0, err + } + xf := os.NewFile(uintptr(fd), f.Name()) + defer func() { + _ = xf.Close() + }() + names, err := xf.Readdirnames(-1) + if err != nil { + return 0, err + } + var buf []byte + for _, name := range names { + buf = append(buf, append([]byte(name), '\000')...) + } + if data == nil { + return len(buf), nil + } + return copy(data, buf), nil +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go new file mode 100644 index 00000000000..4153decb12c --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_unsupported.go @@ -0,0 +1,70 @@ +//go:build !linux && !freebsd && !netbsd && !darwin && !solaris +// +build !linux,!freebsd,!netbsd,!darwin,!solaris + +package xattr + +import ( + "os" + "syscall" +) + +const ( + // We need to use the default for non supported operating systems + ENOATTR = syscall.ENODATA +) + +// XATTR_SUPPORTED will be true if the current platform is supported +const XATTR_SUPPORTED = false + +func getxattr(path string, name string, data []byte) (int, error) { + return 0, nil +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return 0, nil +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return 0, nil +} + +func setxattr(path string, name string, data []byte, flags int) error { + return nil +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return nil +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return nil +} + +func removexattr(path string, name string) error { + return nil +} + +func lremovexattr(path string, name string) error { + return nil +} + +func fremovexattr(f *os.File, name string) error { + return nil +} + +func listxattr(path string, data []byte) (int, error) { + return 0, nil +} + +func llistxattr(path string, data []byte) (int, error) { + return 0, nil +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return 0, nil +} + +// dummy +func stringsFromByteSlice(buf []byte) (result []string) { + return []string{} +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 00000000000..b423feaea9f --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// +// Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// +// Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 00000000000..10f46948dc1 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 00000000000..a014ac92aa9 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,61 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 00000000000..b2cc0515049 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 00000000000..a481b2243f8 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 00000000000..167c59d2d5a --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 00000000000..dda3f143bec --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} From 13073f07138a3f2be8eaa681371c27e1ffe0b86a Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Tue, 19 Jul 2022 16:31:35 +0200 Subject: [PATCH 10/35] fscrypt: Unlock: Fetch keys early Fetch keys from KMS before doing anything else. This will catch KMS errors before setting up any fscrypt metadata. Signed-off-by: Marcel Lauhoff --- internal/util/fscrypt/fscrypt.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go index d339258ed3e..53ec08ced7f 100644 --- a/internal/util/fscrypt/fscrypt.go +++ b/internal/util/fscrypt/fscrypt.go @@ -294,9 +294,17 @@ func Unlock( volEncryption *util.VolumeEncryption, stagingTargetPath string, volID string, ) error { + // Fetches keys from KMS. Do this first to catch KMS errors before setting up anything. + keyFn, err := createKeyFuncFromVolumeEncryption(ctx, *volEncryption, volID) + if err != nil { + log.ErrorLog(ctx, "fscrypt: could not create key function: %v", err) + + return err + } + fscryptContext, err := fscryptactions.NewContextFromMountpoint(stagingTargetPath, nil) if err != nil { - log.ErrorLog(ctx, "fscrypt: failed to create context from mountpoint %v: %w", stagingTargetPath) + log.ErrorLog(ctx, "fscrypt: failed to create context from mountpoint %v: %w", stagingTargetPath, err) return err } @@ -318,7 +326,7 @@ func Unlock( if err = fscryptContext.Mount.Setup(0o755); err != nil { alreadySetupErr := &fscryptfilesystem.ErrAlreadySetup{} if errors.As(err, &alreadySetupErr) { - log.DebugLog(ctx, "fscrypt: metadata directory %q already set up", alreadySetupErr.Mount.Path) + log.DebugLog(ctx, "fscrypt: metadata directory in %q already set up", alreadySetupErr.Mount.Path) metadataDirExists = true } else { log.ErrorLog(ctx, "fscrypt: mount setup failed: %v", err) @@ -339,13 +347,6 @@ func Unlock( metadataDirExists, kernelPolicyExists) } - keyFn, err := createKeyFuncFromVolumeEncryption(ctx, *volEncryption, volID) - if err != nil { - log.ErrorLog(ctx, "fscrypt: could not create key function: %v", err) - - return err - } - protectorName := fmt.Sprintf("%s-%s", FscryptProtectorPrefix, volEncryption.GetID()) switch volEncryption.KMS.RequiresDEKStore() { From 3d9cd6594758075909718beaca7f05fc2c69b917 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Tue, 19 Jul 2022 16:57:26 +0200 Subject: [PATCH 11/35] fscrypt: Fetch passphrase when keyFn is invoked not created Fetch password when keyFn is invoked, not when it is created. This allows creation of the keyFn before actually creating the passphrase. Signed-off-by: Marcel Lauhoff --- internal/util/fscrypt/fscrypt.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go index 53ec08ced7f..53c5f99c929 100644 --- a/internal/util/fscrypt/fscrypt.go +++ b/internal/util/fscrypt/fscrypt.go @@ -85,13 +85,13 @@ func createKeyFuncFromVolumeEncryption( encryption util.VolumeEncryption, volID string, ) (func(fscryptactions.ProtectorInfo, bool) (*fscryptcrypto.Key, error), error) { - passphrase, err := getPassphrase(ctx, encryption, volID) - if err != nil { - return nil, err - } - keyFunc := func(info fscryptactions.ProtectorInfo, retry bool) (*fscryptcrypto.Key, error) { - key, err := fscryptcrypto.NewBlankKey(32) + passphrase, err := getPassphrase(ctx, encryption, volID) + if err != nil { + return nil, err + } + + key, err := fscryptcrypto.NewBlankKey(encryptionPassphraseSize / 2) copy(key.Data(), passphrase) return key, err From e165c9eb0cf8e08f7f0bdc6ec6f0a8b94f06fc9b Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 12 Aug 2022 17:06:48 +0200 Subject: [PATCH 12/35] fscrypt: Determine best supported fscrypt policy on node init Currently fscrypt supports policies version 1 and 2. 2 is the best choice and was the only choice prior to this commit. This adds support for kernels < 5.4, by selecting policy version 1 there. Signed-off-by: Marcel Lauhoff --- internal/util/fscrypt/fscrypt.go | 33 +++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go index 53c5f99c929..6a3bf9c4073 100644 --- a/internal/util/fscrypt/fscrypt.go +++ b/internal/util/fscrypt/fscrypt.go @@ -47,6 +47,17 @@ const ( encryptionPassphraseSize = 64 ) +var policyV2Support = []util.KernelVersion{ + { + Version: 5, + PatchLevel: 4, + SubLevel: 0, + ExtraVersion: 0, + Distribution: "", + Backport: false, + }, +} + func AppendEncyptedSubdirectory(dir string) string { return path.Join(dir, FscryptSubdir) } @@ -266,10 +277,30 @@ func IsDirectoryUnlocked(directoryPath, filesystem string) error { return nil } +func getBestPolicyVersion() (int64, error) { + // fetch the current running kernel info + release, err := util.GetKernelVersion() + if err != nil { + return 0, fmt.Errorf("fetching current kernel version failed: %w", err) + } + + switch { + case util.CheckKernelSupport(release, policyV2Support): + return 2, nil + default: + return 1, nil + } +} + // InitializeNode performs once per nodeserver initialization // required by the fscrypt library. Creates /etc/fscrypt.conf. func InitializeNode(ctx context.Context) error { - err := fscryptactions.CreateConfigFile(FscryptHashingTimeTarget, 2) + policyVersion, err := getBestPolicyVersion() + if err != nil { + return fmt.Errorf("fscrypt node init failed to determine best policy version: %w", err) + } + + err = fscryptactions.CreateConfigFile(FscryptHashingTimeTarget, policyVersion) if err != nil { existsError := &fscryptactions.ErrConfigFileExists{} if errors.As(err, &existsError) { From 1d781fd27ba86d98a07d864e483e4ba868483b3c Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Tue, 2 Aug 2022 16:09:24 +0200 Subject: [PATCH 13/35] fscrypt: Update mount info before create context NewContextFrom{Mountpoint,Path} functions use cached `/proc/self/mountinfo` to find mounted file systems by device ID. Since we run fscrypt as a library in a long-lived process the cached information is likely to be stale. Stale entries may map device IDs to mount points of already destroyed RBDs and fail context creation. Updating the cache beforehand prevents this. Signed-off-by: Marcel Lauhoff --- internal/util/fscrypt/fscrypt.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go index 6a3bf9c4073..07bce6a41bb 100644 --- a/internal/util/fscrypt/fscrypt.go +++ b/internal/util/fscrypt/fscrypt.go @@ -333,6 +333,11 @@ func Unlock( return err } + err = fscryptfilesystem.UpdateMountInfo() + if err != nil { + return err + } + fscryptContext, err := fscryptactions.NewContextFromMountpoint(stagingTargetPath, nil) if err != nil { log.ErrorLog(ctx, "fscrypt: failed to create context from mountpoint %v: %w", stagingTargetPath, err) From bfd397dd0e9d41f1ca9517f5fa6f322cb803062a Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Tue, 9 Aug 2022 22:57:21 +0200 Subject: [PATCH 14/35] fscrypt: Use constant protector name Use constant protector name 'ceph-csi' instead of constant prefix concatenated with the volume ID. When cloning volumes the ID changes and fscrypt protected directories become inunlockable due to the protector name change Signed-off-by: Marcel Lauhoff --- internal/util/fscrypt/fscrypt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go index 07bce6a41bb..9ca822d67bd 100644 --- a/internal/util/fscrypt/fscrypt.go +++ b/internal/util/fscrypt/fscrypt.go @@ -383,7 +383,7 @@ func Unlock( metadataDirExists, kernelPolicyExists) } - protectorName := fmt.Sprintf("%s-%s", FscryptProtectorPrefix, volEncryption.GetID()) + protectorName := FscryptProtectorPrefix switch volEncryption.KMS.RequiresDEKStore() { case kms.DEKStoreMetadata: From c63133e7e94cbc07f8255071c522824735f73c95 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 12 Aug 2022 15:05:02 +0200 Subject: [PATCH 15/35] fscrypt: fsync encrypted dir after setting policy [workaround] Revert once our google/fscrypt dependency is upgraded to a version that includes https://github.com/google/fscrypt/pull/359 gets accepted Signed-off-by: Marcel Lauhoff --- internal/util/fscrypt/fscrypt.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go index 9ca822d67bd..3d35b142c9e 100644 --- a/internal/util/fscrypt/fscrypt.go +++ b/internal/util/fscrypt/fscrypt.go @@ -111,6 +111,20 @@ func createKeyFuncFromVolumeEncryption( return keyFunc, nil } +// fsyncEncryptedDirectory calls sync on dirPath. It is intended to +// work around the fscrypt library not syncing the directory it sets a +// policy on. +// TODO Remove when the fscrypt dependency has https://github.com/google/fscrypt/pull/359 +func fsyncEncryptedDirectory(dirPath string) error { + dir, err := os.Open(dirPath) + if err != nil { + return err + } + defer dir.Close() + + return dir.Sync() +} + // unlockExisting tries to unlock an already set up fscrypt directory using keys from Ceph CSI. func unlockExisting( ctx context.Context, @@ -225,6 +239,12 @@ func initializeAndUnlock( return err } + if err = fsyncEncryptedDirectory(encryptedPath); err != nil { + log.ErrorLog(ctx, "fscrypt: fsync encrypted dir - to flush kernel policy to disk failed %v", err) + + return err + } + return nil } From 802d766c09d9053c8fec0a96750d63941c9decad Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 7 Sep 2022 17:32:10 +0200 Subject: [PATCH 16/35] fscrypt: fix metadata directory permissions Call Mount.Setup with SingleUserWritable constant instead of 0o755, which is silently ignored and causes the /.fscrypt/{policy,protector}/ directories to have mode 000. Signed-off-by: Marcel Lauhoff --- internal/util/fscrypt/fscrypt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/util/fscrypt/fscrypt.go b/internal/util/fscrypt/fscrypt.go index 3d35b142c9e..6f87b69d67d 100644 --- a/internal/util/fscrypt/fscrypt.go +++ b/internal/util/fscrypt/fscrypt.go @@ -379,7 +379,7 @@ func Unlock( // 1. Do we have a metadata directory (.fscrypt) set up? metadataDirExists := false - if err = fscryptContext.Mount.Setup(0o755); err != nil { + if err = fscryptContext.Mount.Setup(fscryptfilesystem.SingleUserWritable); err != nil { alreadySetupErr := &fscryptfilesystem.ErrAlreadySetup{} if errors.As(err, &alreadySetupErr) { log.DebugLog(ctx, "fscrypt: metadata directory in %q already set up", alreadySetupErr.Mount.Path) From 65c8787ead0ce0bece49d93c55085c63856b4c85 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 27 May 2022 21:55:58 +0200 Subject: [PATCH 17/35] rbd: fscrypt file encryption support Integrate basic fscrypt functionality into RBD initialization. To activate file encryption instead of block introduce the new 'encryptionType' storage class key. Signed-off-by: Marcel Lauhoff --- internal/rbd/encryption.go | 127 +++++++++++++++++++++++++++++------- internal/rbd/nodeserver.go | 35 +++++++++- internal/rbd/rbd_journal.go | 2 +- internal/rbd/rbd_util.go | 22 +++++-- 4 files changed, 154 insertions(+), 32 deletions(-) diff --git a/internal/rbd/encryption.go b/internal/rbd/encryption.go index 60522504f2e..83da9672708 100644 --- a/internal/rbd/encryption.go +++ b/internal/rbd/encryption.go @@ -63,6 +63,11 @@ const ( oldMetadataDEK = ".rbd.csi.ceph.com/dek" encryptionPassphraseSize = 20 + + // rbdDefaultEncryptionType is the default to use when the + // user did not specify an "encryptionType", but set + // "encryption": true. + rbdDefaultEncryptionType = util.EncryptionTypeBlock ) // checkRbdImageEncrypted verifies if rbd image was encrypted when created. @@ -98,11 +103,20 @@ func (ri *rbdImage) isBlockEncrypted() bool { return ri.blockEncryption != nil } -// isBlockDeviceEncrypted returns `true` if the filesystem on the rbdImage is (or needs to be) encrypted. +// isFileEncrypted returns `true` if the filesystem on the rbdImage is (or needs to be) encrypted. func (ri *rbdImage) isFileEncrypted() bool { return ri.fileEncryption != nil } +func IsFileEncrypted(ctx context.Context, volOptions map[string]string) (bool, error) { + _, encType, err := ParseEncryptionOpts(ctx, volOptions) + if err != nil { + return false, err + } + + return encType == util.EncryptionTypeFile, nil +} + // setupBlockEncryption configures the metadata of the RBD image for encryption: // - the Data-Encryption-Key (DEK) will be generated stored for use by the KMS; // - the RBD image will be marked to support encryption in its metadata. @@ -137,7 +151,7 @@ func (ri *rbdImage) setupBlockEncryption(ctx context.Context) error { // (Usecase: Restoring snapshot into a storageclass with different encryption config). func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) error { // nothing to do if parent image is not encrypted. - if !ri.isBlockEncrypted() { + if !ri.isBlockEncrypted() && !ri.isFileEncrypted() { return nil } @@ -146,25 +160,54 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) "set!? Call stack: %s", ri, cp, ri.VolID, util.CallStack()) } - // get the unencrypted passphrase - passphrase, err := ri.blockEncryption.GetCryptoPassphrase(ri.VolID) - if err != nil { - return fmt.Errorf("failed to fetch passphrase for %q: %w", - ri, err) + if ri.isBlockEncrypted() { + // get the unencrypted passphrase + passphrase, err := ri.blockEncryption.GetCryptoPassphrase(ri.VolID) + if err != nil { + return fmt.Errorf("failed to fetch passphrase for %q: %w", + ri, err) + } + + if !copyOnlyPassphrase { + cp.blockEncryption, err = util.NewVolumeEncryption(ri.blockEncryption.GetID(), ri.blockEncryption.KMS) + if errors.Is(err, util.ErrDEKStoreNeeded) { + cp.blockEncryption.SetDEKStore(cp) + } + } + + // re-encrypt the plain passphrase for the cloned volume + err = cp.blockEncryption.StoreCryptoPassphrase(cp.VolID, passphrase) + if err != nil { + return fmt.Errorf("failed to store passphrase for %q: %w", + cp, err) + } } - if !copyOnlyPassphrase { - cp.blockEncryption, err = util.NewVolumeEncryption(ri.blockEncryption.GetID(), ri.blockEncryption.KMS) + if ri.isFileEncrypted() && !copyOnlyPassphrase { + var err error + cp.fileEncryption, err = util.NewVolumeEncryption(ri.fileEncryption.GetID(), ri.fileEncryption.KMS) if errors.Is(err, util.ErrDEKStoreNeeded) { - cp.blockEncryption.SetDEKStore(cp) + _, err := ri.fileEncryption.KMS.GetSecret("") + if errors.Is(err, kmsapi.ErrGetSecretUnsupported) { + return err + } } } - // re-encrypt the plain passphrase for the cloned volume - err = cp.blockEncryption.StoreCryptoPassphrase(cp.VolID, passphrase) - if err != nil { - return fmt.Errorf("failed to store passphrase for %q: %w", - cp, err) + if ri.isFileEncrypted() && ri.fileEncryption.KMS.RequiresDEKStore() == kmsapi.DEKStoreIntegrated { + // get the unencrypted passphrase + passphrase, err := ri.fileEncryption.GetCryptoPassphrase(ri.VolID) + if err != nil { + return fmt.Errorf("failed to fetch passphrase for %q: %w", + ri, err) + } + + // re-encrypt the plain passphrase for the cloned volume + err = cp.fileEncryption.StoreCryptoPassphrase(cp.VolID, passphrase) + if err != nil { + return fmt.Errorf("failed to store passphrase for %q: %w", + cp, err) + } } // copy encryption status for the original volume @@ -173,6 +216,7 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) return fmt.Errorf("failed to get encryption status for %q: %w", ri, err) } + err = cp.ensureEncryptionMetadataSet(status) if err != nil { return fmt.Errorf("failed to store encryption status for %q: "+ @@ -185,12 +229,12 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) // repairEncryptionConfig checks the encryption state of the current rbdImage, // and makes sure that the destination rbdImage has the same configuration. func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error { - if !ri.isBlockEncrypted() { + if !ri.isBlockEncrypted() && !ri.isFileEncrypted() { return nil } // if ri is encrypted, copy its configuration in case it is missing - if !dest.isBlockEncrypted() { + if !dest.isBlockEncrypted() && !dest.isFileEncrypted() { // dest needs to be connected to the cluster, otherwise it will // not be possible to write any metadata if dest.conn == nil { @@ -262,14 +306,22 @@ func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) } func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[string]string) error { - kmsID, err := ri.ParseEncryptionOpts(ctx, volOptions) + kmsID, encType, err := ParseEncryptionOpts(ctx, volOptions) if err != nil { return err } else if kmsID == "" { return nil } - err = ri.configureBlockDeviceEncryption(kmsID, credentials) + switch encType { + case util.EncryptionTypeBlock: + err = ri.configureBlockEncryption(kmsID, credentials) + case util.EncryptionTypeFile: + err = ri.configureFileEncryption(kmsID, credentials) + case util.EncryptionTypeInvalid: + return fmt.Errorf("invalid encryption type") + } + if err != nil { return fmt.Errorf("invalid encryption kms configuration: %w", err) } @@ -278,10 +330,10 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str } // ParseEncryptionOpts returns kmsID and sets Owner attribute. -func (ri *rbdImage) ParseEncryptionOpts( +func ParseEncryptionOpts( ctx context.Context, volOptions map[string]string, -) (string, error) { +) (string, util.EncryptionType, error) { var ( err error ok bool @@ -289,14 +341,16 @@ func (ri *rbdImage) ParseEncryptionOpts( ) encrypted, ok = volOptions["encrypted"] if !ok { - return "", nil + return "", util.EncryptionTypeInvalid, err } kmsID, err = util.FetchEncryptionKMSID(encrypted, volOptions["encryptionKMSID"]) if err != nil { - return "", err + return "", util.EncryptionTypeInvalid, err } - return kmsID, nil + encType := util.FetchEncryptionType(volOptions, rbdDefaultEncryptionType) + + return kmsID, encType, nil } // configureBlockDeviceEncryption sets up the VolumeEncryption for this rbdImage. Once @@ -318,6 +372,31 @@ func (ri *rbdImage) configureBlockEncryption(kmsID string, credentials map[strin return nil } +// configureBlockDeviceEncryption sets up the VolumeEncryption for this rbdImage. Once +// configured, use isEncrypted() to see if the volume supports encryption. +func (ri *rbdImage) configureFileEncryption(kmsID string, credentials map[string]string) error { + kms, err := kmsapi.GetKMS(ri.Owner, kmsID, credentials) + if err != nil { + return err + } + + ri.fileEncryption, err = util.NewVolumeEncryption(kmsID, kms) + + if errors.Is(err, util.ErrDEKStoreNeeded) { + // fscrypt uses secrets directly from the KMS. + // Therefore we do not support an additional DEK + // store. Since not all "metadata" KMS support + // GetSecret, test for support here. Postpone any + // other error handling + _, err := ri.fileEncryption.KMS.GetSecret("") + if errors.Is(err, kmsapi.ErrGetSecretUnsupported) { + return err + } + } + + return nil +} + // StoreDEK saves the DEK in the metadata, overwrites any existing contents. func (ri *rbdImage) StoreDEK(volumeID, dek string) error { if ri.VolID == "" { diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 13ae406633f..3a46625f6d0 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -27,6 +27,7 @@ import ( csicommon "github.com/ceph/ceph-csi/internal/csi-common" "github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/fscrypt" "github.com/ceph/ceph-csi/internal/util/log" librbd "github.com/ceph/go-ceph/rbd" @@ -433,6 +434,12 @@ func (ns *NodeServer) stageTransaction( transaction.isBlockEncrypted = true } + if volOptions.isFileEncrypted() { + if err = fscrypt.InitializeNode(ctx); err != nil { + return transaction, err + } + } + stagingTargetPath := getStagingTargetPath(req) isBlock := req.GetVolumeCapability().GetBlock() != nil @@ -444,12 +451,20 @@ func (ns *NodeServer) stageTransaction( transaction.isStagePathCreated = true // nodeStage Path - err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath) + err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath, volOptions.isFileEncrypted()) if err != nil { return transaction, err } transaction.isMounted = true + if volOptions.isFileEncrypted() { + log.DebugLog(ctx, "rbd fscrypt: trying to unlock filesystem on %s image %q", stagingTargetPath, volOptions.VolID) + err = fscrypt.Unlock(ctx, volOptions.fileEncryption, stagingTargetPath, volOptions.VolID) + if err != nil { + return transaction, err + } + } + // As we are supporting the restore of a volume to a bigger size and // creating bigger size clone from a volume, we need to check filesystem // resize is required, if required resize filesystem. @@ -691,6 +706,17 @@ func (ns *NodeServer) NodePublishVolume( return &csi.NodePublishVolumeResponse{}, nil } + fileEncrypted, err := IsFileEncrypted(ctx, req.GetVolumeContext()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if fileEncrypted { + stagingPath = fscrypt.AppendEncyptedSubdirectory(stagingPath) + if err = fscrypt.IsDirectoryUnlocked(stagingPath, req.GetVolumeCapability().GetMount().GetFsType()); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + // Publish Path err = ns.mountVolume(ctx, stagingPath, req) if err != nil { @@ -707,6 +733,7 @@ func (ns *NodeServer) mountVolumeToStagePath( req *csi.NodeStageVolumeRequest, staticVol bool, stagingPath, devicePath string, + fileEncryption bool, ) error { readOnly := false fsType := req.GetVolumeCapability().GetMount().GetFsType() @@ -751,7 +778,11 @@ func (ns *NodeServer) mountVolumeToStagePath( args := []string{} switch fsType { case "ext4": - args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1", devicePath} + args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1"} + if fileEncryption { + args = append(args, "-Oencrypt") + } + args = append(args, devicePath) case "xfs": args = []string{"-K", devicePath} // always disable reflink diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index e0e9914643a..d470929940f 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -568,7 +568,7 @@ func RegenerateJournal( rbdVol.Owner = owner - kmsID, err = rbdVol.ParseEncryptionOpts(ctx, volumeAttributes) + kmsID, _, err = ParseEncryptionOpts(ctx, volumeAttributes) if err != nil { return "", err } diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index ff2e682b413..e33f7252300 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -137,7 +137,7 @@ type rbdImage struct { // fileEncryption provides access to optional VolumeEncryption functions (e.g fscrypt) fileEncryption *util.VolumeEncryption - CreatedAt *timestamp.Timestamp + CreatedAt *timestamp.Timestamp // conn is a connection to the Ceph cluster obtained from a ConnPool conn *util.ClusterConnection @@ -393,6 +393,9 @@ func (ri *rbdImage) Destroy() { if ri.isBlockEncrypted() { ri.blockEncryption.Destroy() } + if ri.isFileEncrypted() { + ri.fileEncryption.Destroy() + } } // String returns the image-spec (pool/{namespace/}image) format of the image. @@ -631,9 +634,16 @@ func (ri *rbdImage) deleteImage(ctx context.Context) error { } if ri.isBlockEncrypted() { - log.DebugLog(ctx, "rbd: going to remove DEK for %q", ri) + log.DebugLog(ctx, "rbd: going to remove DEK for %q (block encryption)", ri) if err = ri.blockEncryption.RemoveDEK(ri.VolID); err != nil { - log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", ri.VolID, err) + log.WarningLog(ctx, "failed to clean the passphrase for volume %s (block encryption): %s", ri.VolID, err) + } + } + + if ri.isFileEncrypted() { + log.DebugLog(ctx, "rbd: going to remove DEK for %q (file encryption)", ri) + if err = ri.fileEncryption.RemoveDEK(ri.VolID); err != nil { + log.WarningLog(ctx, "failed to clean the passphrase for volume %s (file encryption): %s", ri.VolID, err) } } @@ -1967,11 +1977,13 @@ func (ri *rbdImage) getOrigSnapName(snapID uint64) (string, error) { } func (ri *rbdImage) isCompatibleEncryption(dst *rbdImage) error { + riEncrypted := ri.isBlockEncrypted() || ri.isFileEncrypted() + dstEncrypted := dst.isBlockEncrypted() || dst.isFileEncrypted() switch { - case ri.isBlockEncrypted() && !dst.isBlockEncrypted(): + case riEncrypted && !dstEncrypted: return fmt.Errorf("cannot create unencrypted volume from encrypted volume %q", ri) - case !ri.isBlockEncrypted() && dst.isBlockEncrypted(): + case !riEncrypted && dstEncrypted: return fmt.Errorf("cannot create encrypted volume from unencrypted volume %q", ri) } From d8e94c36c5c4a6128a95c43107f498f9a0cb87a8 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Thu, 4 Aug 2022 17:15:52 +0200 Subject: [PATCH 18/35] rbd: Handle encryption type default at a more meaningful place Different places have different meaningful fallback. When parsing from user we should default to block, when parsing stored config we should default to invalid and handle that as an error. Signed-off-by: Marcel Lauhoff --- internal/rbd/encryption.go | 7 ++++--- internal/rbd/rbd_journal.go | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/internal/rbd/encryption.go b/internal/rbd/encryption.go index 83da9672708..c7211328819 100644 --- a/internal/rbd/encryption.go +++ b/internal/rbd/encryption.go @@ -109,7 +109,7 @@ func (ri *rbdImage) isFileEncrypted() bool { } func IsFileEncrypted(ctx context.Context, volOptions map[string]string) (bool, error) { - _, encType, err := ParseEncryptionOpts(ctx, volOptions) + _, encType, err := ParseEncryptionOpts(ctx, volOptions, util.EncryptionTypeInvalid) if err != nil { return false, err } @@ -306,7 +306,7 @@ func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) } func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[string]string) error { - kmsID, encType, err := ParseEncryptionOpts(ctx, volOptions) + kmsID, encType, err := ParseEncryptionOpts(ctx, volOptions, rbdDefaultEncryptionType) if err != nil { return err } else if kmsID == "" { @@ -333,6 +333,7 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str func ParseEncryptionOpts( ctx context.Context, volOptions map[string]string, + fallbackEncType util.EncryptionType, ) (string, util.EncryptionType, error) { var ( err error @@ -348,7 +349,7 @@ func ParseEncryptionOpts( return "", util.EncryptionTypeInvalid, err } - encType := util.FetchEncryptionType(volOptions, rbdDefaultEncryptionType) + encType := util.FetchEncryptionType(volOptions, fallbackEncType) return kmsID, encType, nil } diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index d470929940f..f512cc34055 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -568,7 +568,7 @@ func RegenerateJournal( rbdVol.Owner = owner - kmsID, _, err = ParseEncryptionOpts(ctx, volumeAttributes) + kmsID, encryptionType, err = ParseEncryptionOpts(ctx, volumeAttributes, util.EncryptionTypeInvalid) if err != nil { return "", err } From 8e49c77a87efce832a5ddbb67936d24c11d01166 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Thu, 7 Jul 2022 18:31:38 +0200 Subject: [PATCH 19/35] rbd: Document new encryptionType storage class example Signed-off-by: Marcel Lauhoff --- examples/rbd/storageclass.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index a30114667ac..7270b642b58 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -108,6 +108,14 @@ parameters: # A string is expected here, i.e. "true", not true. # encrypted: "true" + # (optional) Select the encryption type when encrypted: "true" above. + # Valid values are: + # "file": Enable file encryption on the mounted filesystem + # "block": Encrypt RBD block device + # When unspecified assume type "block". "file" and "block" are + # mutally exclusive. + # encryptionType: "block" + # (optional) Use external key management system for encryption passphrases by # specifying a unique ID matching KMS ConfigMap. The ID is only used for # correlation to configmap entry. From ca3fabe0ce0da4f0711cf713c73184cc9bfb2ffd Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 12 Aug 2022 17:19:34 +0200 Subject: [PATCH 20/35] rbd: Add volume journal encryption support Add fscrypt support to the journal to support operations like snapshotting. Signed-off-by: Marcel Lauhoff --- internal/rbd/rbd_journal.go | 50 ++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index f512cc34055..d57203bf523 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -87,6 +87,17 @@ func validateRbdVol(rbdVol *rbdVolume) error { return err } +func getEncryptionConfig(rbdVol *rbdVolume) (string, util.EncryptionType) { + switch { + case rbdVol.isBlockEncrypted(): + return rbdVol.blockEncryption.GetID(), util.EncryptionTypeBlock + case rbdVol.isFileEncrypted(): + return rbdVol.fileEncryption.GetID(), util.EncryptionTypeFile + default: + return "", util.EncryptionTypeInvalid + } +} + /* checkSnapCloneExists, and its counterpart checkVolExists, function checks if the passed in rbdSnapshot or rbdVolume exists on the backend. @@ -130,7 +141,7 @@ func checkSnapCloneExists( defer j.Destroy() snapData, err := j.CheckReservation(ctx, rbdSnap.JournalPool, - rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "") + rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "", util.EncryptionTypeInvalid) if err != nil { return false, err } @@ -245,10 +256,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er return false, err } - kmsID := "" - if rv.isBlockEncrypted() { - kmsID = rv.blockEncryption.GetID() - } + kmsID, encryptionType := getEncryptionConfig(rv) j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds) if err != nil { @@ -257,7 +265,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er defer j.Destroy() imageData, err := j.CheckReservation( - ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID) + ctx, rv.JournalPool, rv.RequestName, rv.NamePrefix, "", kmsID, encryptionType) if err != nil { return false, err } @@ -386,14 +394,12 @@ func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, c } defer j.Destroy() - kmsID := "" - if rbdVol.isBlockEncrypted() { - kmsID = rbdVol.blockEncryption.GetID() - } + kmsID, encryptionType := getEncryptionConfig(rbdVol) rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName( ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID, - rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner, "") + rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner, + "", encryptionType) if err != nil { return err } @@ -460,10 +466,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr return err } - kmsID := "" - if rbdVol.isBlockEncrypted() { - kmsID = rbdVol.blockEncryption.GetID() - } + kmsID, encryptionType := getEncryptionConfig(rbdVol) j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr) if err != nil { @@ -473,7 +476,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName( ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID, - rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner, "") + rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner, "", encryptionType) if err != nil { return err } @@ -548,11 +551,12 @@ func RegenerateJournal( ) (string, error) { ctx := context.Background() var ( - vi util.CSIIdentifier - rbdVol *rbdVolume - kmsID string - err error - ok bool + vi util.CSIIdentifier + rbdVol *rbdVolume + kmsID string + encryptionType util.EncryptionType + err error + ok bool ) rbdVol = &rbdVolume{} @@ -605,7 +609,7 @@ func RegenerateJournal( rbdVol.NamePrefix = volumeAttributes["volumeNamePrefix"] imageData, err := j.CheckReservation( - ctx, rbdVol.JournalPool, rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID) + ctx, rbdVol.JournalPool, rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, encryptionType) if err != nil { return "", err } @@ -639,7 +643,7 @@ func RegenerateJournal( rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName( ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID, - rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner, "") + rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner, "", encryptionType) if err != nil { return "", err } From 2bb1f66f77ea5a2cd6220ad2e78d04d16b397162 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Thu, 4 Aug 2022 17:52:36 +0200 Subject: [PATCH 21/35] rbd: support file encrypted snapshots Support fscrypt on RBD snapshots Signed-off-by: Marcel Lauhoff --- internal/rbd/rbd_util.go | 19 ++++++++++++++++--- internal/rbd/snapshot.go | 1 + 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index e33f7252300..edfe4e2b4ee 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -1024,10 +1024,17 @@ func genSnapFromSnapID( rbdSnap, err) } - if imageAttributes.KmsID != "" { + if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeBlock { err = rbdSnap.configureBlockEncryption(imageAttributes.KmsID, secrets) if err != nil { - return fmt.Errorf("failed to configure encryption for "+ + return fmt.Errorf("failed to configure block encryption for "+ + "%q: %w", rbdSnap, err) + } + } + if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeFile { + err = rbdSnap.configureFileEncryption(imageAttributes.KmsID, secrets) + if err != nil { + return fmt.Errorf("failed to configure file encryption for "+ "%q: %w", rbdSnap, err) } } @@ -1119,12 +1126,18 @@ func generateVolumeFromVolumeID( rbdVol.ImageID = imageAttributes.ImageID rbdVol.Owner = imageAttributes.Owner - if imageAttributes.KmsID != "" { + if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeBlock { err = rbdVol.configureBlockEncryption(imageAttributes.KmsID, secrets) if err != nil { return rbdVol, err } } + if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeFile { + err = rbdVol.configureFileEncryption(imageAttributes.KmsID, secrets) + if err != nil { + return rbdVol, err + } + } // convert the journal pool ID to name, for use in DeleteVolume cases if imageAttributes.JournalPoolID >= 0 { rbdVol.JournalPool, err = util.GetPoolName(rbdVol.Monitors, cr, imageAttributes.JournalPoolID) diff --git a/internal/rbd/snapshot.go b/internal/rbd/snapshot.go index c5f0a0bdf7f..e793d3e0139 100644 --- a/internal/rbd/snapshot.go +++ b/internal/rbd/snapshot.go @@ -112,6 +112,7 @@ func generateVolFromSnap(rbdSnap *rbdSnapshot) *rbdVolume { // snapshot will have the same volumeID which cases the panic in // copyEncryptionConfig function. vol.blockEncryption = rbdSnap.blockEncryption + vol.fileEncryption = rbdSnap.fileEncryption return vol } From a6c459d28f2ee6478dcb815d2ec9c6fddb19f109 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Tue, 6 Sep 2022 14:43:47 +0200 Subject: [PATCH 22/35] rbd: Add context to fscrypt errors Signed-off-by: Marcel Lauhoff --- internal/rbd/nodeserver.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 3a46625f6d0..96d7989a5fc 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -436,7 +436,7 @@ func (ns *NodeServer) stageTransaction( if volOptions.isFileEncrypted() { if err = fscrypt.InitializeNode(ctx); err != nil { - return transaction, err + return transaction, fmt.Errorf("file encryption setup for %s failed: %w", volOptions.VolID, err) } } @@ -458,10 +458,11 @@ func (ns *NodeServer) stageTransaction( transaction.isMounted = true if volOptions.isFileEncrypted() { - log.DebugLog(ctx, "rbd fscrypt: trying to unlock filesystem on %s image %q", stagingTargetPath, volOptions.VolID) + log.DebugLog(ctx, "rbd fscrypt: trying to unlock filesystem on %s image %s", stagingTargetPath, volOptions.VolID) err = fscrypt.Unlock(ctx, volOptions.fileEncryption, stagingTargetPath, volOptions.VolID) if err != nil { - return transaction, err + return transaction, fmt.Errorf("file system encryption unlock in %s image %s failed: %w", + stagingTargetPath, volOptions.VolID, err) } } From 92ba6c280ff09a602fc69b69f24aa7139f465250 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Thu, 7 Jul 2022 18:33:46 +0200 Subject: [PATCH 23/35] e2e: Add fscrypt on rbd helper Add validation functions for fscrypt on RBD volumes Signed-off-by: Marcel Lauhoff --- e2e/rbd_helper.go | 90 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go index ab1a11fc759..d55385f7805 100644 --- a/e2e/rbd_helper.go +++ b/e2e/rbd_helper.go @@ -531,6 +531,55 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f return nil } +func validateEncryptedFilesystemAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error { + pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) + if err != nil { + return err + } + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName) + err = validateEncryptedFilesystem(f, rbdImageSpec, imageData.pvName, app.Name) + if err != nil { + return err + } + + if kms != noKMS && kms.canGetPassphrase() { + // check new passphrase created + _, stdErr := kms.getPassphrase(f, imageData.csiVolumeHandle) + if stdErr != "" { + return fmt.Errorf("failed to read passphrase from vault: %s", stdErr) + } + } + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + return err + } + + if kms != noKMS && kms.canGetPassphrase() { + // check new passphrase created + stdOut, _ := kms.getPassphrase(f, imageData.csiVolumeHandle) + if stdOut != "" { + return fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut) + } + } + + if kms != noKMS && kms.canVerifyKeyDestroyed() { + destroyed, msg := kms.verifyKeyDestroyed(f, imageData.csiVolumeHandle) + if !destroyed { + return fmt.Errorf("passphrased was not destroyed: %s", msg) + } else if msg != "" { + e2elog.Logf("passphrase destroyed, but message returned: %s", msg) + } + } + + return nil +} + type validateFunc func(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error // noPVCValidation can be used to pass to validatePVCClone when no extra @@ -583,6 +632,47 @@ func validateEncryptedImage(f *framework.Framework, rbdImageSpec, pvName, appNam return nil } +func validateEncryptedFilesystem(f *framework.Framework, rbdImageSpec, pvName, appName string) error { + pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get pod %q in namespace %q: %w", appName, f.UniqueName, err) + } + volumeMountPath := fmt.Sprintf( + "/var/lib/kubelet/pods/%s/volumes/kubernetes.io~csi/%s/mount", + pod.UID, + pvName) + + selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, rbdDaemonsetName) + if err != nil { + return fmt.Errorf("failed to get labels: %w", err) + } + opt := metav1.ListOptions{ + LabelSelector: selector, + } + cmd := fmt.Sprintf("lsattr -la %s | grep -E '%s/.\\s+Encrypted'", volumeMountPath, volumeMountPath) + _, _, err = execCommandInContainer(f, cmd, cephCSINamespace, "csi-rbdplugin", &opt) + if err != nil { + cmd = fmt.Sprintf("lsattr -lRa %s", volumeMountPath) + stdOut, stdErr, listErr := execCommandInContainer(f, cmd, cephCSINamespace, "csi-rbdplugin", &opt) + if listErr == nil { + return fmt.Errorf("error checking file encrypted attribute of %q. listing filesystem+attrs: %s %s", + volumeMountPath, stdOut, stdErr) + } + return fmt.Errorf("error checking file encrypted attribute: %w", err) + } + + mountType, err := getMountType(selector, volumeMountPath, f) + if err != nil { + return err + } + if mountType == "crypt" { + return fmt.Errorf("mount type of %q is %v suggesting that the block device was encrypted,"+ + " when it must not have been", volumeMountPath, mountType) + } + + return nil +} + func listRBDImages(f *framework.Framework, pool string) ([]string, error) { var imgInfos []string From a5a17fdf5b35ee870fb40ef7ad7a8e5c940030da Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 13 Jul 2022 17:59:10 +0200 Subject: [PATCH 24/35] e2e: Add helper to run encryption tests on block and file Add a `By` wrapper to parameterize encryption related test functions and run them on both block and file encryption Signed-off-by: Marcel Lauhoff --- e2e/rbd.go | 9 +++++++++ e2e/rbd_helper.go | 2 ++ 2 files changed, 11 insertions(+) diff --git a/e2e/rbd.go b/e2e/rbd.go index 4383d177461..24559c419b4 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -232,6 +232,15 @@ func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string) } } +// ByFileAndBlockEncryption wraps ginkgo's By to run the test body using file and block encryption specific validators. +func ByFileAndBlockEncryption( + text string, + callback func(validator encryptionValidateFunc, encryptionType string), +) { + By(text+" (block)", func() { callback(validateEncryptedPVCAndAppBinding, "block") }) + By(text+" (file)", func() { callback(validateEncryptedFilesystemAndAppBinding, "file") }) +} + var _ = Describe("RBD", func() { f := framework.NewDefaultFramework(rbdType) var c clientset.Interface diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go index d55385f7805..ec2e0f479ec 100644 --- a/e2e/rbd_helper.go +++ b/e2e/rbd_helper.go @@ -482,6 +482,8 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc, return nil } +type encryptionValidateFunc func(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error + func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error { pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) if err != nil { From d27f1377463c8fb6c3528f8fd468dfe4cbeb815a Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Thu, 4 Aug 2022 18:12:33 +0200 Subject: [PATCH 25/35] e2e: Add PVC validator to ByFileAndBlockEncryption Signed-off-by: Marcel Lauhoff --- e2e/rbd.go | 6 +++--- e2e/rbd_helper.go | 14 ++++++++++++-- e2e/utils.go | 3 ++- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index 24559c419b4..032fb7e755d 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -235,10 +235,10 @@ func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string) // ByFileAndBlockEncryption wraps ginkgo's By to run the test body using file and block encryption specific validators. func ByFileAndBlockEncryption( text string, - callback func(validator encryptionValidateFunc, encryptionType string), + callback func(validator encryptionValidateFunc, pvcValidator validateFunc, encryptionType string), ) { - By(text+" (block)", func() { callback(validateEncryptedPVCAndAppBinding, "block") }) - By(text+" (file)", func() { callback(validateEncryptedFilesystemAndAppBinding, "file") }) + By(text+" (block)", func() { callback(validateEncryptedPVCAndAppBinding, isBlockEncryptedPVC, "block") }) + By(text+" (file)", func() { callback(validateEncryptedFilesystemAndAppBinding, isFileEncryptedPVC, "file") }) } var _ = Describe("RBD", func() { diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go index ec2e0f479ec..c189737e73d 100644 --- a/e2e/rbd_helper.go +++ b/e2e/rbd_helper.go @@ -588,14 +588,24 @@ type validateFunc func(f *framework.Framework, pvc *v1.PersistentVolumeClaim, ap // validation of the PVC is needed. var noPVCValidation validateFunc -func isEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error { +type imageValidateFunc func(f *framework.Framework, rbdImageSpec, pvName, appName string) error + +func isEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod, validateFunc imageValidateFunc) error { imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) if err != nil { return err } rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName) - return validateEncryptedImage(f, rbdImageSpec, imageData.pvName, app.Name) + return validateFunc(f, rbdImageSpec, imageData.pvName, app.Name) +} + +func isBlockEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error { + return isEncryptedPVC(f, pvc, app, validateEncryptedImage) +} + +func isFileEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod) error { + return isEncryptedPVC(f, pvc, app, validateEncryptedFilesystem) } // validateEncryptedImage verifies that the RBD image is encrypted. The diff --git a/e2e/utils.go b/e2e/utils.go index 58ed04efade..f844bfae684 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -1025,6 +1025,7 @@ func validatePVCSnapshot( pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string, kms, restoreKMS kmsConfig, restoreSCName, dataPool string, f *framework.Framework, + isEncryptedPVC validateFunc, ) { var wg sync.WaitGroup wgErrs := make([]error, totalCount) @@ -1448,7 +1449,7 @@ func validateController( } if scParams["encrypted"] == strconv.FormatBool(true) { // check encryption - err = isEncryptedPVC(f, resizePvc, app) + err = isBlockEncryptedPVC(f, resizePvc, app) if err != nil { return err } From 656af96fb4d670628cb089eac415dbef8ca46c27 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 13 Jul 2022 18:25:15 +0200 Subject: [PATCH 26/35] e2e: Run encryption related tests on file and block type Replace `By` with `ByFileAndBlockEncryption` in all encryption related tests to parameterize them to file and block encryption. Signed-off-by: Marcel Lauhoff --- e2e/rbd.go | 160 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 100 insertions(+), 60 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index 032fb7e755d..eb7fdf3aabc 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -998,7 +998,8 @@ var _ = Describe("RBD", func() { noKMS, noKMS, defaultSCName, erasureCodedPool, - f) + f, + noPVCValidation) }) By("create an erasure coded PVC and validate PVC-PVC clone", func() { @@ -1880,13 +1881,13 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func( + validator encryptionValidateFunc, _ validateFunc, encType string) { if !testNBD { e2elog.Logf("skipping NBD test") return } - err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -1902,12 +1903,13 @@ var _ = Describe("RBD", func() { "mapOptions": nbdMapOptions, "cephLogStrategy": e2eDefaultCephLogStrategy, "encrypted": "true", + "encryptionType": encType, }, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -1924,7 +1926,9 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume", func( + validator encryptionValidateFunc, _ validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -1934,12 +1938,12 @@ var _ = Describe("RBD", func() { f, defaultSCName, nil, - map[string]string{"encrypted": "true"}, + map[string]string{"encrypted": "true", "encryptionType": encType}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -1956,7 +1960,8 @@ var _ = Describe("RBD", func() { } }) - By("Resize Encrypted Block PVC and check Device size", func() { + ByFileAndBlockEncryption("Resize Encrypted Block PVC and check Device size", func( + validator encryptionValidateFunc, _ validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -1966,7 +1971,7 @@ var _ = Describe("RBD", func() { f, defaultSCName, nil, - map[string]string{"encrypted": "true"}, + map[string]string{"encrypted": "true", "encryptionType": encType}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) @@ -1981,15 +1986,16 @@ var _ = Describe("RBD", func() { validateRBDImageCount(f, 0, defaultRBDPool) validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) - // Block PVC resize - err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) - if err != nil { - e2elog.Failf("failed to resize block PVC: %v", err) + if encType != "file" { + // Block PVC resize + err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) + if err != nil { + e2elog.Failf("failed to resize block PVC: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) } - // validate created backend rbd images - validateRBDImageCount(f, 0, defaultRBDPool) - validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) - err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2000,7 +2006,8 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2008,12 +2015,13 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultKMS, f) + err = validator(pvcPath, appPath, vaultKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2030,7 +2038,8 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2038,6 +2047,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tokens-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2057,7 +2067,7 @@ var _ = Describe("RBD", func() { e2elog.Failf("failed to create Secret with tenant token: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultTokensKMS, f) + err = validator(pvcPath, appPath, vaultTokensKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2081,7 +2091,8 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume with VaultTenantSA KMS", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTenantSA KMS", func( + validator encryptionValidateFunc, _ validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2089,6 +2100,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2101,7 +2113,7 @@ var _ = Describe("RBD", func() { } defer deleteTenantServiceAccount(f.UniqueName) - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultTenantSAKMS, f) + err = validator(pvcPath, appPath, vaultTenantSAKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2118,7 +2130,8 @@ var _ = Describe("RBD", func() { } }) - By("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", func() { + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2126,12 +2139,13 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "secrets-metadata-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2148,7 +2162,8 @@ var _ = Describe("RBD", func() { } }) - By("test RBD volume encryption with user secrets based SecretsMetadataKMS", func() { + ByFileAndBlockEncryption("test RBD volume encryption with user secrets based SecretsMetadataKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2156,6 +2171,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "user-ns-secrets-metadata-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2171,7 +2187,7 @@ var _ = Describe("RBD", func() { e2elog.Failf("failed to create user Secret: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2199,9 +2215,9 @@ var _ = Describe("RBD", func() { } }) - By( + ByFileAndBlockEncryption( "test RBD volume encryption with user secrets based SecretsMetadataKMS with tenant namespace", - func() { + func(validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2209,6 +2225,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "user-secrets-metadata-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2224,7 +2241,7 @@ var _ = Describe("RBD", func() { e2elog.Failf("failed to create user Secret: %v", err) } - err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + err = validator(pvcPath, appPath, noKMS, f) if err != nil { e2elog.Failf("failed to validate encrypted pvc: %v", err) } @@ -2307,7 +2324,8 @@ var _ = Describe("RBD", func() { noKMS, noKMS, defaultSCName, noDataPool, - f) + f, + noPVCValidation) }) By("create a PVC-PVC clone and bind it to an app", func() { @@ -2324,7 +2342,8 @@ var _ = Describe("RBD", func() { f) }) - By("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func() { + ByFileAndBlockEncryption("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func( + validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2332,6 +2351,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2342,7 +2362,7 @@ var _ = Describe("RBD", func() { pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath, vaultKMS, vaultKMS, defaultSCName, noDataPool, - f) + f, isEncryptedPVC) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2354,7 +2374,8 @@ var _ = Describe("RBD", func() { } }) - By("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func() { + ByFileAndBlockEncryption("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func( + validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string) { restoreSCName := "restore-sc" err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2363,6 +2384,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2372,6 +2394,7 @@ var _ = Describe("RBD", func() { scOpts = map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2387,7 +2410,8 @@ var _ = Describe("RBD", func() { validatePVCSnapshot(1, pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath, vaultKMS, vaultTenantSAKMS, - restoreSCName, noDataPool, f) + restoreSCName, noDataPool, f, + isEncryptedPVC) err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName) if err != nil { @@ -2409,7 +2433,8 @@ var _ = Describe("RBD", func() { } }) - By("Validate PVC-PVC clone with different SC from vaultKMS to vaultTenantSAKMS", func() { + ByFileAndBlockEncryption("Validate PVC-PVC clone with different SC from vaultKMS to vaultTenantSAKMS", func( + validator encryptionValidateFunc, isValidPVC validateFunc, encType string) { restoreSCName := "restore-sc" err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2418,6 +2443,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2427,6 +2453,7 @@ var _ = Describe("RBD", func() { scOpts = map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2447,7 +2474,7 @@ var _ = Describe("RBD", func() { restoreSCName, noDataPool, secretsMetadataKMS, - isEncryptedPVC, + isValidPVC, f) err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName) @@ -2469,7 +2496,8 @@ var _ = Describe("RBD", func() { } }) - By("create an encrypted PVC-PVC clone and bind it to an app", func() { + ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app", func( + validator encryptionValidateFunc, isValidPVC validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2477,6 +2505,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "secrets-metadata-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2491,7 +2520,7 @@ var _ = Describe("RBD", func() { defaultSCName, noDataPool, secretsMetadataKMS, - isEncryptedPVC, + isValidPVC, f) err = deleteResource(rbdExamplePath + "storageclass.yaml") @@ -2504,7 +2533,8 @@ var _ = Describe("RBD", func() { } }) - By("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func() { + ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func( + validator encryptionValidateFunc, isValidPVC validateFunc, encType string) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2512,6 +2542,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": encType, } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2526,7 +2557,7 @@ var _ = Describe("RBD", func() { defaultSCName, noDataPool, vaultKMS, - isEncryptedPVC, + isValidPVC, f) err = deleteResource(rbdExamplePath + "storageclass.yaml") @@ -4001,10 +4032,13 @@ var _ = Describe("RBD", func() { } }) - By("restore snapshot to bigger size encrypted PVC with VaultKMS", func() { + ByFileAndBlockEncryption("restore snapshot to bigger size encrypted PVC with VaultKMS", func( + _ encryptionValidateFunc, _ validateFunc, encType string, + ) { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", + "encryptionType": encType, } err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -4036,15 +4070,17 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to validate restore bigger size clone: %v", err) } - // validate block mode PVC - err = validateBiggerPVCFromSnapshot(f, - rawPvcPath, - rawAppPath, - snapshotPath, - pvcBlockRestorePath, - appBlockRestorePath) - if err != nil { - e2elog.Failf("failed to validate restore bigger size clone: %v", err) + if encType != "file" { + // validate block mode PVC + err = validateBiggerPVCFromSnapshot(f, + rawPvcPath, + rawAppPath, + snapshotPath, + pvcBlockRestorePath, + appBlockRestorePath) + if err != nil { + e2elog.Failf("failed to validate restore bigger size clone: %v", err) + } } }) @@ -4059,9 +4095,11 @@ var _ = Describe("RBD", func() { }) By("clone PVC to a bigger size PVC", func() { - By("clone PVC to bigger size encrypted PVC with VaultKMS", func() { + ByFileAndBlockEncryption("clone PVC to bigger size encrypted PVC with VaultKMS", func( + validator encryptionValidateFunc, _ validateFunc, encType string) { scOpts := map[string]string{ "encrypted": "true", + "encryptionType": encType, "encryptionKMSID": "vault-test", } err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) @@ -4084,14 +4122,16 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to validate bigger size clone: %v", err) } - // validate block mode PVC - err = validateBiggerCloneFromPVC(f, - rawPvcPath, - rawAppPath, - pvcBlockSmartClonePath, - appBlockSmartClonePath) - if err != nil { - e2elog.Failf("failed to validate bigger size clone: %v", err) + if encType != "file" { + // validate block mode PVC + err = validateBiggerCloneFromPVC(f, + rawPvcPath, + rawAppPath, + pvcBlockSmartClonePath, + appBlockSmartClonePath) + if err != nil { + e2elog.Failf("failed to validate bigger size clone: %v", err) + } } }) From d94e75ef04725e23aaba6e51d24eaebbdd2cb808 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 17 Aug 2022 17:22:50 +0200 Subject: [PATCH 27/35] e2e: Add encrypted PVC with default settings test Add test that enables encryption with default type. Check that we set up block encryption. Signed-off-by: Marcel Lauhoff --- e2e/rbd.go | 43 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index eb7fdf3aabc..00fe5886417 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -4317,14 +4317,38 @@ var _ = Describe("RBD", func() { validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) }) - // Make sure this should be last testcase in this file, because - // it deletes pool - By("Create a PVC and delete PVC when backend pool deleted", func() { - err := pvcDeleteWhenPoolNotFound(pvcPath, false, f) + By("create a PVC and bind it to an app with encrypted RBD volume (default type setting)", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete PVC when pool not found: %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) + } + err = createRBDStorageClass( + f.ClientSet, + f, + defaultSCName, + nil, + map[string]string{"encrypted": "true"}, + deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) + } + err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) + if err != nil { + e2elog.Failf("failed to validate encrypted pvc: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) } }) + // delete RBD provisioner secret err := deleteCephUser(f, keyringRBDProvisionerUsername) if err != nil { @@ -4335,6 +4359,15 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err) } + + // Make sure this should be last testcase in this file, because + // it deletes pool + By("Create a PVC and delete PVC when backend pool deleted", func() { + err := pvcDeleteWhenPoolNotFound(pvcPath, false, f) + if err != nil { + e2elog.Failf("failed to delete PVC when pool not found: %v", err) + } + }) }) }) }) From b179a139769520fbdf7eeb4e10591db74aaffa3a Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 24 Aug 2022 10:35:49 +0200 Subject: [PATCH 28/35] e2e: Apply formatting to rbd suite and helper Apply formatting for previous changes separately to make the commit diffs easier to read. Signed-off-by: Marcel Lauhoff --- e2e/rbd.go | 98 ++++++++++++++++++++++++++--------------------- e2e/rbd_helper.go | 8 +++- 2 files changed, 62 insertions(+), 44 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index 00fe5886417..b187783a8f8 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -1882,7 +1882,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func( - validator encryptionValidateFunc, _ validateFunc, encType string) { + validator encryptionValidateFunc, _ validateFunc, encType string, + ) { if !testNBD { e2elog.Logf("skipping NBD test") @@ -1961,7 +1962,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("Resize Encrypted Block PVC and check Device size", func( - validator encryptionValidateFunc, _ validateFunc, encType string) { + validator encryptionValidateFunc, _ validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2007,7 +2009,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string) { + validator encryptionValidateFunc, _ validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2039,7 +2042,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string) { + validator encryptionValidateFunc, _ validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2092,7 +2096,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTenantSA KMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string) { + validator encryptionValidateFunc, _ validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2130,40 +2135,41 @@ var _ = Describe("RBD", func() { } }) - ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string) { - err := deleteResource(rbdExamplePath + "storageclass.yaml") - if err != nil { - e2elog.Failf("failed to delete storageclass: %v", err) - } - scOpts := map[string]string{ - "encrypted": "true", - "encryptionKMSID": "secrets-metadata-test", - "encryptionType": encType, - } - err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) - if err != nil { - e2elog.Failf("failed to create storageclass: %v", err) - } - err = validator(pvcPath, appPath, noKMS, f) - if err != nil { - e2elog.Failf("failed to validate encrypted pvc: %v", err) - } - // validate created backend rbd images - validateRBDImageCount(f, 0, defaultRBDPool) - validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) - err = deleteResource(rbdExamplePath + "storageclass.yaml") - if err != nil { - e2elog.Failf("failed to delete storageclass: %v", err) - } - err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) - if err != nil { - e2elog.Failf("failed to create storageclass: %v", err) - } - }) + ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", + func(validator encryptionValidateFunc, _ validateFunc, encType string) { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + scOpts := map[string]string{ + "encrypted": "true", + "encryptionKMSID": "secrets-metadata-test", + "encryptionType": encType, + } + err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) + } + err = validator(pvcPath, appPath, noKMS, f) + if err != nil { + e2elog.Failf("failed to validate encrypted pvc: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass: %v", err) + } + }) ByFileAndBlockEncryption("test RBD volume encryption with user secrets based SecretsMetadataKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string) { + validator encryptionValidateFunc, _ validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2343,7 +2349,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func( - validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string) { + validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2375,7 +2382,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func( - validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string) { + validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string, + ) { restoreSCName := "restore-sc" err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2434,7 +2442,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("Validate PVC-PVC clone with different SC from vaultKMS to vaultTenantSAKMS", func( - validator encryptionValidateFunc, isValidPVC validateFunc, encType string) { + validator encryptionValidateFunc, isValidPVC validateFunc, encType string, + ) { restoreSCName := "restore-sc" err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2497,7 +2506,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app", func( - validator encryptionValidateFunc, isValidPVC validateFunc, encType string) { + validator encryptionValidateFunc, isValidPVC validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2534,7 +2544,8 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func( - validator encryptionValidateFunc, isValidPVC validateFunc, encType string) { + validator encryptionValidateFunc, isValidPVC validateFunc, encType string, + ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -4096,7 +4107,8 @@ var _ = Describe("RBD", func() { By("clone PVC to a bigger size PVC", func() { ByFileAndBlockEncryption("clone PVC to bigger size encrypted PVC with VaultKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string) { + validator encryptionValidateFunc, _ validateFunc, encType string, + ) { scOpts := map[string]string{ "encrypted": "true", "encryptionType": encType, diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go index c189737e73d..498a2f05229 100644 --- a/e2e/rbd_helper.go +++ b/e2e/rbd_helper.go @@ -590,7 +590,12 @@ var noPVCValidation validateFunc type imageValidateFunc func(f *framework.Framework, rbdImageSpec, pvName, appName string) error -func isEncryptedPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, app *v1.Pod, validateFunc imageValidateFunc) error { +func isEncryptedPVC( + f *framework.Framework, + pvc *v1.PersistentVolumeClaim, + app *v1.Pod, + validateFunc imageValidateFunc, +) error { imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) if err != nil { return err @@ -670,6 +675,7 @@ func validateEncryptedFilesystem(f *framework.Framework, rbdImageSpec, pvName, a return fmt.Errorf("error checking file encrypted attribute of %q. listing filesystem+attrs: %s %s", volumeMountPath, stdOut, stdErr) } + return fmt.Errorf("error checking file encrypted attribute: %w", err) } From e4c561b7e1a7d022b28f07eeb13316a8a449d18f Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 31 Aug 2022 08:32:05 +0200 Subject: [PATCH 29/35] e2e: Use utilEncryptionType instead of string in rbd suite Signed-off-by: Marcel Lauhoff --- e2e/rbd.go | 84 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 44 insertions(+), 40 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index b187783a8f8..fc2feabdb01 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -235,10 +235,14 @@ func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string) // ByFileAndBlockEncryption wraps ginkgo's By to run the test body using file and block encryption specific validators. func ByFileAndBlockEncryption( text string, - callback func(validator encryptionValidateFunc, pvcValidator validateFunc, encryptionType string), + callback func(validator encryptionValidateFunc, pvcValidator validateFunc, encryptionType util.EncryptionType), ) { - By(text+" (block)", func() { callback(validateEncryptedPVCAndAppBinding, isBlockEncryptedPVC, "block") }) - By(text+" (file)", func() { callback(validateEncryptedFilesystemAndAppBinding, isFileEncryptedPVC, "file") }) + By(text+" (block)", func() { + callback(validateEncryptedPVCAndAppBinding, isBlockEncryptedPVC, util.EncryptionTypeBlock) + }) + By(text+" (file)", func() { + callback(validateEncryptedFilesystemAndAppBinding, isFileEncryptedPVC, util.EncryptionTypeFile) + }) } var _ = Describe("RBD", func() { @@ -1882,7 +1886,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func( - validator encryptionValidateFunc, _ validateFunc, encType string, + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { if !testNBD { e2elog.Logf("skipping NBD test") @@ -1904,7 +1908,7 @@ var _ = Describe("RBD", func() { "mapOptions": nbdMapOptions, "cephLogStrategy": e2eDefaultCephLogStrategy, "encrypted": "true", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), }, deletePolicy) if err != nil { @@ -1928,7 +1932,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume", func( - validator encryptionValidateFunc, _ validateFunc, encType string, + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -1939,7 +1943,7 @@ var _ = Describe("RBD", func() { f, defaultSCName, nil, - map[string]string{"encrypted": "true", "encryptionType": encType}, + map[string]string{"encrypted": "true", "encryptionType": util.EncryptionTypeString(encType)}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) @@ -1962,7 +1966,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("Resize Encrypted Block PVC and check Device size", func( - validator encryptionValidateFunc, _ validateFunc, encType string, + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -1973,7 +1977,7 @@ var _ = Describe("RBD", func() { f, defaultSCName, nil, - map[string]string{"encrypted": "true", "encryptionType": encType}, + map[string]string{"encrypted": "true", "encryptionType": util.EncryptionTypeString(encType)}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) @@ -1988,7 +1992,7 @@ var _ = Describe("RBD", func() { validateRBDImageCount(f, 0, defaultRBDPool) validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) - if encType != "file" { + if encType != util.EncryptionTypeFile { // Block PVC resize err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { @@ -2009,7 +2013,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string, + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2018,7 +2022,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2042,7 +2046,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string, + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2051,7 +2055,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tokens-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2096,7 +2100,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with VaultTenantSA KMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string, + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2105,7 +2109,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2136,7 +2140,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", - func(validator encryptionValidateFunc, _ validateFunc, encType string) { + func(validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2144,7 +2148,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "secrets-metadata-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2168,7 +2172,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("test RBD volume encryption with user secrets based SecretsMetadataKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string, + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2177,7 +2181,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "user-ns-secrets-metadata-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2223,7 +2227,7 @@ var _ = Describe("RBD", func() { ByFileAndBlockEncryption( "test RBD volume encryption with user secrets based SecretsMetadataKMS with tenant namespace", - func(validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string) { + func(validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) @@ -2231,7 +2235,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "user-secrets-metadata-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2349,7 +2353,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func( - validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string, + validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2358,7 +2362,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2382,7 +2386,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func( - validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType string, + validator encryptionValidateFunc, isEncryptedPVC validateFunc, encType util.EncryptionType, ) { restoreSCName := "restore-sc" err := deleteResource(rbdExamplePath + "storageclass.yaml") @@ -2392,7 +2396,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2402,7 +2406,7 @@ var _ = Describe("RBD", func() { scOpts = map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2442,7 +2446,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("Validate PVC-PVC clone with different SC from vaultKMS to vaultTenantSAKMS", func( - validator encryptionValidateFunc, isValidPVC validateFunc, encType string, + validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType, ) { restoreSCName := "restore-sc" err := deleteResource(rbdExamplePath + "storageclass.yaml") @@ -2452,7 +2456,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2462,7 +2466,7 @@ var _ = Describe("RBD", func() { scOpts = map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tenant-sa-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2506,7 +2510,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app", func( - validator encryptionValidateFunc, isValidPVC validateFunc, encType string, + validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2515,7 +2519,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "secrets-metadata-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -2544,7 +2548,7 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func( - validator encryptionValidateFunc, isValidPVC validateFunc, encType string, + validator encryptionValidateFunc, isValidPVC validateFunc, encType util.EncryptionType, ) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { @@ -2553,7 +2557,7 @@ var _ = Describe("RBD", func() { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -4044,12 +4048,12 @@ var _ = Describe("RBD", func() { }) ByFileAndBlockEncryption("restore snapshot to bigger size encrypted PVC with VaultKMS", func( - _ encryptionValidateFunc, _ validateFunc, encType string, + _ encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), } err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { @@ -4081,7 +4085,7 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to validate restore bigger size clone: %v", err) } - if encType != "file" { + if encType != util.EncryptionTypeFile { // validate block mode PVC err = validateBiggerPVCFromSnapshot(f, rawPvcPath, @@ -4107,11 +4111,11 @@ var _ = Describe("RBD", func() { By("clone PVC to a bigger size PVC", func() { ByFileAndBlockEncryption("clone PVC to bigger size encrypted PVC with VaultKMS", func( - validator encryptionValidateFunc, _ validateFunc, encType string, + validator encryptionValidateFunc, _ validateFunc, encType util.EncryptionType, ) { scOpts := map[string]string{ "encrypted": "true", - "encryptionType": encType, + "encryptionType": util.EncryptionTypeString(encType), "encryptionKMSID": "vault-test", } err := createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) @@ -4134,7 +4138,7 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to validate bigger size clone: %v", err) } - if encType != "file" { + if encType != util.EncryptionTypeFile { // validate block mode PVC err = validateBiggerCloneFromPVC(f, rawPvcPath, From 7fa6046522853674fac6177ffa1fc0aba4c76725 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Thu, 18 Aug 2022 18:07:08 +0200 Subject: [PATCH 30/35] kms: Add GetSecret() to KMIP KMS Signed-off-by: Marcel Lauhoff --- internal/kms/kmip.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/kms/kmip.go b/internal/kms/kmip.go index 250d8f1dbcb..f0e1fac2bbc 100644 --- a/internal/kms/kmip.go +++ b/internal/kms/kmip.go @@ -500,6 +500,10 @@ func (kms *kmipKMS) verifyResponse( return &batchItem, nil } +func (kms *kmipKMS) GetSecret(volumeID string) (string, error) { + return "", ErrGetSecretUnsupported +} + // TODO: use the following structs from https://github.com/gemalto/kmip-go // when https://github.com/ThalesGroup/kmip-go/issues/21 is resolved. // refer: https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html. From 829414cd7c2e16c618c86273cc65a8a1633b2529 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 26 Aug 2022 19:57:12 +0200 Subject: [PATCH 31/35] scripts: Add env to set minikube iso url Make iso url configurable to use pre-release minikube images or local-built (file://) Signed-off-by: Marcel Lauhoff --- scripts/minikube.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/minikube.sh b/scripts/minikube.sh index 1def9a36968..e864f95b70a 100755 --- a/scripts/minikube.sh +++ b/scripts/minikube.sh @@ -180,6 +180,7 @@ function disable_storage_addons() { # configure minikube MINIKUBE_ARCH=${MINIKUBE_ARCH:-"amd64"} MINIKUBE_VERSION=${MINIKUBE_VERSION:-"latest"} +MINIKUBE_ISO_URL=${MINIKUBE_ISO_URL:-""} KUBE_VERSION=${KUBE_VERSION:-"latest"} CONTAINER_CMD=${CONTAINER_CMD:-"docker"} MEMORY=${MEMORY:-"4096"} @@ -206,6 +207,10 @@ else DISK_CONFIG="" fi +if [[ -n "${MINIKUBE_ISO_URL}" ]]; then + EXTRA_CONFIG="${EXTRA_CONFIG} --iso-url ${MINIKUBE_ISO_URL}" +fi + # configure csi image version CSI_IMAGE_VERSION=${CSI_IMAGE_VERSION:-"canary"} From 12bd495d421ffd36c8a80db4f642537bf347d9a6 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Tue, 6 Sep 2022 18:44:00 +0200 Subject: [PATCH 32/35] util: Add EncryptionTypeNone and unit tests Add type none to distinguish disabled encryption (positive result) from invalid configuration (negative result). Signed-off-by: Marcel Lauhoff --- internal/journal/voljournal.go | 8 +++++--- internal/util/crypto.go | 34 ++++++++++++++++++++++++++-------- internal/util/crypto_test.go | 31 +++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 11 deletions(-) diff --git a/internal/journal/voljournal.go b/internal/journal/voljournal.go index 988bf71cc3b..286d84658c7 100644 --- a/internal/journal/voljournal.go +++ b/internal/journal/voljournal.go @@ -383,11 +383,13 @@ func (conn *Connection) CheckReservation(ctx context.Context, } } - if encryptionType != util.EncryptionTypeInvalid { + if encryptionType != util.EncryptionTypeNone { if savedImageAttributes.EncryptionType != encryptionType { return nil, fmt.Errorf("internal state inconsistent, omap encryption type"+ - " mismatch, request KMS (%s) volume UUID (%s) volume omap KMS (%d)", - kmsConfig, objUUID, savedImageAttributes.EncryptionType) + " mismatch, request type %q(%d) volume UUID (%s) volume omap encryption type %q (%d)", + util.EncryptionTypeString(encryptionType), encryptionType, + objUUID, util.EncryptionTypeString(savedImageAttributes.EncryptionType), + savedImageAttributes.EncryptionType) } } diff --git a/internal/util/crypto.go b/internal/util/crypto.go index 2489ab01470..9eb67a1db31 100644 --- a/internal/util/crypto.go +++ b/internal/util/crypto.go @@ -83,17 +83,29 @@ func FetchEncryptionKMSID(encrypted, kmsID string) (string, error) { type EncryptionType int const ( + // EncryptionTypeInvalid signals invalid or unsupported configuration. EncryptionTypeInvalid EncryptionType = iota - EncryptionTypeBlock = iota - EncryptionTypeFile = iota + // EncryptionTypeNone disables encryption. + EncryptionTypeNone + // EncryptionTypeBlock enables block encryption. + EncryptionTypeBlock + // EncryptionTypeBlock enables file encryption (fscrypt). + EncryptionTypeFile +) + +const ( + encryptionTypeBlockString = "block" + encryptionTypeFileString = "file" ) func ParseEncryptionType(typeStr string) EncryptionType { switch typeStr { - case "block": + case encryptionTypeBlockString: return EncryptionTypeBlock - case "file": + case encryptionTypeFileString: return EncryptionTypeFile + case "": + return EncryptionTypeNone default: return EncryptionTypeInvalid } @@ -102,13 +114,15 @@ func ParseEncryptionType(typeStr string) EncryptionType { func EncryptionTypeString(encType EncryptionType) string { switch encType { case EncryptionTypeBlock: - return "block" + return encryptionTypeBlockString case EncryptionTypeFile: - return "file" - case EncryptionTypeInvalid: + return encryptionTypeFileString + case EncryptionTypeNone: return "" + case EncryptionTypeInvalid: + return "INVALID" default: - return "" + return "UNKNOWN" } } @@ -121,6 +135,10 @@ func FetchEncryptionType(volOptions map[string]string, fallback EncryptionType) return fallback } + if encType == "" { + return EncryptionTypeInvalid + } + return ParseEncryptionType(encType) } diff --git a/internal/util/crypto_test.go b/internal/util/crypto_test.go index a5bb49da617..f4f0f57160f 100644 --- a/internal/util/crypto_test.go +++ b/internal/util/crypto_test.go @@ -63,3 +63,34 @@ func TestKMSWorkflow(t *testing.T) { assert.NoError(t, err) assert.Equal(t, secrets["encryptionPassphrase"], passphrase) } + +func TestEncryptionType(t *testing.T) { + t.Parallel() + assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("wat?")) + assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("both")) + assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("file,block")) + assert.EqualValues(t, EncryptionTypeInvalid, ParseEncryptionType("block,file")) + assert.EqualValues(t, EncryptionTypeBlock, ParseEncryptionType("block")) + assert.EqualValues(t, EncryptionTypeFile, ParseEncryptionType("file")) + assert.EqualValues(t, EncryptionTypeNone, ParseEncryptionType("")) + + for _, s := range []string{"file", "block", ""} { + assert.EqualValues(t, s, EncryptionTypeString(ParseEncryptionType(s))) + } +} + +func TestFetchEncryptionType(t *testing.T) { + t.Parallel() + volOpts := map[string]string{} + assert.EqualValues(t, EncryptionTypeBlock, FetchEncryptionType(volOpts, EncryptionTypeBlock)) + assert.EqualValues(t, EncryptionTypeFile, FetchEncryptionType(volOpts, EncryptionTypeFile)) + assert.EqualValues(t, EncryptionTypeNone, FetchEncryptionType(volOpts, EncryptionTypeNone)) + volOpts["encryptionType"] = "" + assert.EqualValues(t, EncryptionTypeInvalid, FetchEncryptionType(volOpts, EncryptionTypeNone)) + volOpts["encryptionType"] = "block" + assert.EqualValues(t, EncryptionTypeBlock, FetchEncryptionType(volOpts, EncryptionTypeNone)) + volOpts["encryptionType"] = "file" + assert.EqualValues(t, EncryptionTypeFile, FetchEncryptionType(volOpts, EncryptionTypeNone)) + volOpts["encryptionType"] = "INVALID" + assert.EqualValues(t, EncryptionTypeInvalid, FetchEncryptionType(volOpts, EncryptionTypeNone)) +} From 8a23075331f1803e21c8d7fd177242b9d0c7da32 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Tue, 6 Sep 2022 18:46:56 +0200 Subject: [PATCH 33/35] rbd: Use EncryptionTypeNone Signed-off-by: Marcel Lauhoff --- internal/rbd/encryption.go | 6 +++--- internal/rbd/rbd_journal.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/rbd/encryption.go b/internal/rbd/encryption.go index c7211328819..ea65f14aa87 100644 --- a/internal/rbd/encryption.go +++ b/internal/rbd/encryption.go @@ -309,8 +309,6 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str kmsID, encType, err := ParseEncryptionOpts(ctx, volOptions, rbdDefaultEncryptionType) if err != nil { return err - } else if kmsID == "" { - return nil } switch encType { @@ -320,6 +318,8 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str err = ri.configureFileEncryption(kmsID, credentials) case util.EncryptionTypeInvalid: return fmt.Errorf("invalid encryption type") + case util.EncryptionTypeNone: + return nil } if err != nil { @@ -342,7 +342,7 @@ func ParseEncryptionOpts( ) encrypted, ok = volOptions["encrypted"] if !ok { - return "", util.EncryptionTypeInvalid, err + return "", util.EncryptionTypeNone, nil } kmsID, err = util.FetchEncryptionKMSID(encrypted, volOptions["encryptionKMSID"]) if err != nil { diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index d57203bf523..42e504d7103 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -94,7 +94,7 @@ func getEncryptionConfig(rbdVol *rbdVolume) (string, util.EncryptionType) { case rbdVol.isFileEncrypted(): return rbdVol.fileEncryption.GetID(), util.EncryptionTypeFile default: - return "", util.EncryptionTypeInvalid + return "", util.EncryptionTypeNone } } @@ -141,7 +141,7 @@ func checkSnapCloneExists( defer j.Destroy() snapData, err := j.CheckReservation(ctx, rbdSnap.JournalPool, - rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "", util.EncryptionTypeInvalid) + rbdSnap.RequestName, rbdSnap.NamePrefix, rbdSnap.RbdImageName, "", util.EncryptionTypeNone) if err != nil { return false, err } @@ -572,7 +572,7 @@ func RegenerateJournal( rbdVol.Owner = owner - kmsID, encryptionType, err = ParseEncryptionOpts(ctx, volumeAttributes, util.EncryptionTypeInvalid) + kmsID, encryptionType, err = ParseEncryptionOpts(ctx, volumeAttributes, util.EncryptionTypeNone) if err != nil { return "", err } From 1dff9a6e6d1bbe2059f214b86ac79d18d4cd92d9 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Wed, 20 Jul 2022 21:52:22 +0200 Subject: [PATCH 34/35] cephfs: Add placeholder journal fscrypt support Signed-off-by: Marcel Lauhoff --- internal/cephfs/store/fsjournal.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/cephfs/store/fsjournal.go b/internal/cephfs/store/fsjournal.go index d2a7e067812..656304efe9c 100644 --- a/internal/cephfs/store/fsjournal.go +++ b/internal/cephfs/store/fsjournal.go @@ -90,7 +90,7 @@ func CheckVolExists(ctx context.Context, defer j.Destroy() imageData, err := j.CheckReservation( - ctx, volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "") + ctx, volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "", util.EncryptionTypeNone) if err != nil { return nil, err } @@ -278,7 +278,7 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin imageUUID, vid.FsSubvolName, err = j.ReserveName( ctx, volOptions.MetadataPool, util.InvalidPoolID, volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName, - volOptions.NamePrefix, "", "", volOptions.ReservedID, "", volOptions.BackingSnapshotID) + volOptions.NamePrefix, "", "", volOptions.ReservedID, "", volOptions.BackingSnapshotID, util.EncryptionTypeNone) if err != nil { return nil, err } @@ -321,7 +321,7 @@ func ReserveSnap( imageUUID, vid.FsSnapshotName, err = j.ReserveName( ctx, volOptions.MetadataPool, util.InvalidPoolID, volOptions.MetadataPool, util.InvalidPoolID, snap.RequestName, - snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "", "") + snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "", "", util.EncryptionTypeNone) if err != nil { return nil, err } @@ -390,7 +390,7 @@ func CheckSnapExists( defer j.Destroy() snapData, err := j.CheckReservation( - ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "") + ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "", util.EncryptionTypeNone) if err != nil { return nil, nil, err } From eccb1a7fc911ee6f3a604ceeebe0977b03b237d5 Mon Sep 17 00:00:00 2001 From: Marcel Lauhoff Date: Fri, 14 Oct 2022 19:01:47 +0200 Subject: [PATCH 35/35] e2e: Feature flag RBD fscrypt tests (default disabled) Add test-rbd-fscrypt feature flag to e2e suite. Default disabled as the current CI system's kernel doesn't have the required features enabled. Signed-off-by: Marcel Lauhoff --- e2e/e2e_test.go | 1 + e2e/rbd.go | 5 +++++ e2e/utils.go | 1 + 3 files changed, 7 insertions(+) diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go index 338a776b6db..40745d8b5ad 100644 --- a/e2e/e2e_test.go +++ b/e2e/e2e_test.go @@ -39,6 +39,7 @@ func init() { flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver") flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver") flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver") + flag.BoolVar(&testRBDFSCrypt, "test-rbd-fscrypt", false, "test rbd csi driver fscrypt support") flag.BoolVar(&testNBD, "test-nbd", false, "test rbd csi driver with rbd-nbd mounter") flag.BoolVar(&testNFS, "test-nfs", false, "test nfs csi driver") flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm") diff --git a/e2e/rbd.go b/e2e/rbd.go index fc2feabdb01..649fd5d9390 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -241,6 +241,11 @@ func ByFileAndBlockEncryption( callback(validateEncryptedPVCAndAppBinding, isBlockEncryptedPVC, util.EncryptionTypeBlock) }) By(text+" (file)", func() { + if !testRBDFSCrypt { + e2elog.Logf("skipping RBD fscrypt file encryption test") + + return + } callback(validateEncryptedFilesystemAndAppBinding, isFileEncryptedPVC, util.EncryptionTypeFile) }) } diff --git a/e2e/utils.go b/e2e/utils.go index f844bfae684..4e259805fbe 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -85,6 +85,7 @@ var ( deployNFS bool testCephFS bool testRBD bool + testRBDFSCrypt bool testNBD bool testNFS bool helmTest bool