From 2d896c5cfa03cfa3e425ece86077db7d975b4c4e Mon Sep 17 00:00:00 2001 From: Hamza El-Saawy Date: Tue, 7 Nov 2023 13:06:12 -0500 Subject: [PATCH] Add WCOW and vSMB functional tests Update and un-skip WCOW uVM and container tests (and add WCOW uVM benchmarks), as well as WCOW vSMB and LCOW boto files tests. Add WCOW host process tests, including dedicated tests for setting username, and verifying hostname and volume mounts. Moved: - `lcow_bench_test.go` to `uvm_bench_test.go` - `lcow_container_test.go` to `container_test.go` - `lcow_test.go` to `lcow_uvm_test.go` and `uvm_test.go` Fix bug where removing a direct-mapped vSMB share fails. Run (non-virtualization/uVM) functional tests within CI. Make sure container specs are created with the default working directory (`C:\`), similar to how `internal\cmd` works). Relies on PR: https://github.com/microsoft/hcsshim/pull/1974 Signed-off-by: Hamza El-Saawy --- .github/workflows/ci.yml | 51 +- internal/jobcontainers/jobcontainer.go | 18 +- internal/jobcontainers/storage.go | 29 +- internal/uvm/vsmb.go | 25 +- test/functional/container_test.go | 665 ++++++++++++++++++ test/functional/hostprocess_test.go | 400 +++++++++++ test/functional/lcow_bench_test.go | 106 --- test/functional/lcow_container_test.go | 169 ----- test/functional/lcow_test.go | 304 -------- test/functional/lcow_uvm_test.go | 271 +++++++ test/functional/main_test.go | 9 + test/functional/uvm_bench_test.go | 123 ++++ test/functional/uvm_test.go | 71 ++ test/functional/uvm_vsmb_test.go | 199 ++++-- test/functional/wcow_container_bench_test.go | 421 +++++++++++ .../{wcow_test.go => wcow_uvm_test.go} | 0 test/internal/cmd/cmd.go | 1 + test/internal/cmd/io.go | 46 +- test/internal/oci/oci.go | 42 ++ test/pkg/uvm/uvm.go | 32 + 20 files changed, 2317 insertions(+), 665 deletions(-) create mode 100644 test/functional/container_test.go create mode 100644 test/functional/hostprocess_test.go delete mode 100644 test/functional/lcow_bench_test.go delete mode 100644 test/functional/lcow_container_test.go delete mode 100644 test/functional/lcow_test.go create mode 100644 test/functional/lcow_uvm_test.go create mode 100644 test/functional/uvm_bench_test.go create mode 100644 test/functional/uvm_test.go create mode 100644 test/functional/wcow_container_bench_test.go rename test/functional/{wcow_test.go => wcow_uvm_test.go} (100%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c4923f8b84..e9d32cda58 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -325,6 +325,30 @@ jobs: - name: Install gotestsum run: go install gotest.tools/gotestsum@${{ env.GOTESTSUM_VERSION }} + # Download PsExec so we can run (functional) tests as 'NT Authority\System'. + # Needed for hostprocess tests, as well ensuring backup and restore privileges for + # unpacking WCOW images. + - name: Install PsExec.exe + run: | + New-Item -ItemType Directory -Force '${{ github.workspace }}\bin' > $null + '${{ github.workspace }}\bin' | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + + curl.exe -L --no-progress-meter --fail-with-body -o 'C:\PSTools.zip' ` + 'https://download.sysinternals.com/files/PSTools.zip' 2>&1 + if ( $LASTEXITCODE ) { + Write-Output '::error::Could not download PSTools.zip' + exit $LASTEXITCODE + } + + tar.exe xf 'C:\PSTools.zip' -C '${{ github.workspace }}\bin' 'PsExec*' 2>&1 + if ( $LASTEXITCODE ) { + Write-Output '::error::Could not extract PsExec.exe' + exit $LASTEXITCODE + } + + # accept the eula + & '${{ github.workspace }}/bin/psexec' -accepteula -nobanner cmd /c "exit 0" 2>$null + # run tests - name: Test repo run: ${{ env.GOTESTSUM_CMD }} -gcflags=all=-d=checkptr -tags admin -timeout=20m ./... @@ -354,13 +378,34 @@ jobs: ${{ env.GOTESTSUM_CMD_RAW }} ./containerd-shim-runhcs-v1.test.exe '-test.v' working-directory: test + - name: Build and run functional testing binary + run: | + ${{ env.GO_BUILD_TEST_CMD }} ./functional + if ( $LASTEXITCODE ) { + Write-Output '::error::Could not build functional.test.exe' + exit $LASTEXITCODE + } + + # PsExec doesn't load GOBIN into path, so resolve gotestsum path + # don't run uVM (ie, nested virt) or LCOW integrity tests + $cmd = '${{ env.GOTESTSUM_CMD_RAW }} ./functional.test.exe -exclude="LCOW,LCOWIntegrity,uVM" -test.timeout=1h -test.v' + $cmd = $cmd -replace 'gotestsum', ((Get-Command gotestsum)[0].Source) + Write-Host "gotestsum command: $cmd" + + # it appears, that in a GH runner, PsExec always runs noninteractively (even with `-i`) and + # doesn't capture or redirect std IO. + # Instead, write stdout/stderr to a file. + psexec -nobanner -w (Get-Location) -s cmd /c "$cmd > out.txt 2>&1" + $ec = $LASTEXITCODE + Get-Content out.txt + + exit $ec + working-directory: test + # build testing binaries - name: Build cri-containerd Testing Binary run: ${{ env.GO_BUILD_TEST_CMD }} ./cri-containerd working-directory: test - - name: Build functional Testing Binary - run: ${{ env.GO_BUILD_TEST_CMD }} ./functional - working-directory: test - name: Build runhcs Testing Binary run: ${{ env.GO_BUILD_TEST_CMD }} ./runhcs working-directory: test diff --git a/internal/jobcontainers/jobcontainer.go b/internal/jobcontainers/jobcontainer.go index 05b66703f2..4fb3fd3493 100644 --- a/internal/jobcontainers/jobcontainer.go +++ b/internal/jobcontainers/jobcontainer.go @@ -30,11 +30,6 @@ import ( "golang.org/x/sys/windows" ) -var ( - fileBindingSupport bool - checkBindSupportOnce sync.Once -) - const ( // jobContainerNameFmt is the naming format that job objects for job containers will follow. jobContainerNameFmt = "JobContainer_%s" @@ -181,15 +176,8 @@ func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, _ * // show up at beforehand as you would need to know the containers ID before you launched it. Now that the // rootfs location can be static, a user can easily supply C:\hpc\rest\of\path as their work dir and still // supply anything outside of C:\hpc if they want another location on the host. - checkBindSupportOnce.Do(func() { - bindDLL := `C:\windows\system32\bindfltapi.dll` - if _, err := os.Stat(bindDLL); err == nil { - fileBindingSupport = true - } - }) - var closer resources.ResourceCloser - if fileBindingSupport { + if FileBindingSupported() { closer, err = container.bindSetup(ctx, s) } else { closer, err = container.fallbackSetup(ctx, s) @@ -254,7 +242,7 @@ func (c *JobContainer) CreateProcess(ctx context.Context, config interface{}) (_ // If the working directory was changed, that means the user supplied %CONTAINER_SANDBOX_MOUNT_POINT%\\my\dir or something similar. // In that case there's nothing left to do, as we don't want to join it with the mount point again.. If it *wasn't* changed, and there's // no bindflt support then we need to join it with the mount point, as it's some normal path. - if !changed && !fileBindingSupport { + if !changed && !FileBindingSupported() { workDir = filepath.Join(c.rootfsLocation, removeDriveLetter(workDir)) } } @@ -335,7 +323,7 @@ func (c *JobContainer) CreateProcess(ctx context.Context, config interface{}) (_ // (cmd in this case) after launch can now see C:\ as it's in the silo. We could // also add a new mode/flag for the shim where it's just a dummy process launcher, so we can invoke // the shim instead of cmd and have more control over things. - if fileBindingSupport { + if FileBindingSupported() { commandLine = "cmd /c " + commandLine } diff --git a/internal/jobcontainers/storage.go b/internal/jobcontainers/storage.go index 180c27a862..b38c9fca81 100644 --- a/internal/jobcontainers/storage.go +++ b/internal/jobcontainers/storage.go @@ -8,22 +8,24 @@ import ( "os" "path/filepath" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/resources" + "github.com/Microsoft/hcsshim/internal/sync" "github.com/Microsoft/hcsshim/internal/wclayer" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // fallbackRootfsFormat is the fallback location for the rootfs if file binding support isn't available. // %s will be expanded with the container ID. Trailing backslash required for SetVolumeMountPoint and -// DeleteVolumeMountPoint +// DeleteVolumeMountPoint. const fallbackRootfsFormat = `C:\hpc\%s\` // defaultSiloRootfsLocation is the default location the rootfs for the container will show up // inside of a given silo. If bind filter support isn't available the rootfs will be -// C:\hpc\ +// C:\hpc\. const defaultSiloRootfsLocation = `C:\hpc\` func (c *JobContainer) mountLayers(ctx context.Context, containerID string, s *specs.Spec, volumeMountPath string) (_ resources.ResourceCloser, err error) { @@ -72,3 +74,22 @@ func (c *JobContainer) setupRootfsBinding(root, target string) error { } return nil } + +var fileBindingSupportedOnce = sync.OnceValue(func() (bool, error) { + // TODO: use windows.NewLazySystemDLL("bindfltapi.dll").Load() (or windows.LoadLibraryEx directly) + + root := os.Getenv("SystemRoot") + if root == "" { + root = `C:\windows` // shouldn't really need this fall back, but ... + } + bindDLL := filepath.Join(root, `system32\bindfltapi.dll`) + if _, err := os.Stat(bindDLL); err != nil { + return false, err + } + return true, nil +}) + +func FileBindingSupported() bool { + b, _ := fileBindingSupportedOnce() + return b +} diff --git a/internal/uvm/vsmb.go b/internal/uvm/vsmb.go index e67bc62328..314425702a 100644 --- a/internal/uvm/vsmb.go +++ b/internal/uvm/vsmb.go @@ -133,7 +133,12 @@ func forceNoDirectMap(path string) (bool, error) { var info winapi.FILE_ID_INFO // We check for any error, rather than just ERROR_INVALID_PARAMETER. It seems better to also // fall back if e.g. some other backing filesystem is used which returns a different error. - if err := windows.GetFileInformationByHandleEx(h, winapi.FileIdInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))); err != nil { + if err := windows.GetFileInformationByHandleEx( + h, + winapi.FileIdInfo, + (*byte)(unsafe.Pointer(&info)), + uint32(unsafe.Sizeof(info)), + ); err != nil { return true, nil } return false, nil @@ -282,6 +287,24 @@ func (uvm *UtilityVM) removeVSMB(ctx context.Context, hostPath string, readOnly, return nil } + // Cannot remove a directmapped vSMB share without first closing all open handles to the + // share files from inside the the uVM (otherwise, the removal would un-map the files from + // the uVM's memory and subsequent access's would fail). + // Rather than forgetting about the share on the host side, keep it (with refCount == 0) + // in case that directory is re-added back for some reason. + // + // Note: HCS (vmcompute.exe) issues a remove vSMB request to the guest GCS iff: + // - vmwp.exe direct mapped the vSMB share; and + // - the GCS (on its internal bridge) has the PurgeVSmbCachedHandlesSupported capability. + // We do not (currently) have the ability to check for either. + if !share.options.NoDirectmap { + log.G(ctx).WithFields(logrus.Fields{ + "name": share.name, + "path": hostPath, + }).Debug("skipping remove of directmapped vSMB share") + return nil + } + modification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeRemove, Settings: hcsschema.VirtualSmbShare{Name: share.name}, diff --git a/test/functional/container_test.go b/test/functional/container_test.go new file mode 100644 index 0000000000..09a7b32d04 --- /dev/null +++ b/test/functional/container_test.go @@ -0,0 +1,665 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "fmt" + "testing" + + ctrdoci "github.com/containerd/containerd/oci" + "golang.org/x/sys/windows" + + "github.com/Microsoft/hcsshim/internal/jobcontainers" + "github.com/Microsoft/hcsshim/osversion" + + testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" + testcontainer "github.com/Microsoft/hcsshim/test/internal/container" + testlayers "github.com/Microsoft/hcsshim/test/internal/layers" + testoci "github.com/Microsoft/hcsshim/test/internal/oci" + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +func TestContainerLifecycle(t *testing.T) { + requireFeatures(t, featureContainer) + requireAnyFeature(t, featureUVM, featureLCOW, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + + t.Run("LCOW", func(t *testing.T) { + requireFeatures(t, featureLCOW, featureUVM) + + ls := linuxImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultLCOWOptions(ctx, t)) + + scratch, _ := testlayers.ScratchSpace(ctx, t, vm, "", "", "") + cID := vm.ID() + "-container" + spec := testoci.CreateLinuxSpec(ctx, t, cID, + testoci.DefaultLinuxSpecOpts(cID, + ctrdoci.WithProcessArgs("/bin/sh", "-c", testoci.TailNullArgs), + testoci.WithWindowsLayerFolders(append(ls, scratch)))...) + + c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + init := testcontainer.Start(ctx, t, c, nil) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.Kill(ctx, t, init) + testcmd.WaitExitCode(ctx, t, init, testcmd.ForcedKilledExitCode) + }) // LCOW + + t.Run("WCOW Hyper-V", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureUVM) + + ls := windowsImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + cID := vm.ID() + "-container" + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.Kill(ctx, t, init) + testcmd.WaitExitCode(ctx, t, init, int(windows.ERROR_PROCESS_ABORTED)) + }) // WCOW Hyper-V + + t.Run("WCOW Process", func(t *testing.T) { + requireFeatures(t, featureWCOW) + + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(windowsImageLayers(ctx, t), scratch)), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.Kill(ctx, t, init) + testcmd.WaitExitCode(ctx, t, init, int(windows.ERROR_PROCESS_ABORTED)) + }) // WCOW Process + + t.Run("WCOW HostProcess", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureHostProcess) + + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(windowsImageLayers(ctx, t), scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + if _, ok := c.(*jobcontainers.JobContainer); !ok { + t.Fatalf("expected type JobContainer; got %T", c) + } + + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.Kill(ctx, t, init) + testcmd.WaitExitCode(ctx, t, init, 1) + }) // WCOW HostProcess +} + +var ioTests = []struct { + name string + lcowArgs []string + wcowCmd string + in string + want string +}{ + { + name: "true", + lcowArgs: []string{"/bin/sh", "-c", "true"}, + wcowCmd: "cmd /c (exit 0)", + want: "", + }, + { + name: "echo", + lcowArgs: []string{"/bin/sh", "-c", `echo -n "hi y'all"`}, + wcowCmd: `cmd /c echo hi y'all`, + want: "hi y'all", + }, + { + name: "tee", + lcowArgs: []string{"/bin/sh", "-c", "tee"}, + wcowCmd: "", // TODO: figure out cmd.exe equivalent + in: "are you copying me?", + want: "are you copying me?", + }, +} + +func TestContainerIO(t *testing.T) { + requireFeatures(t, featureContainer) + requireAnyFeature(t, featureUVM, featureLCOW, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + + t.Run("LCOW", func(t *testing.T) { + requireFeatures(t, featureLCOW, featureUVM) + + opts := defaultLCOWOptions(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, opts) + + ls := linuxImageLayers(ctx, t) + cache := testlayers.CacheFile(ctx, t, "") + + for _, tt := range ioTests { + if len(tt.lcowArgs) == 0 { + continue + } + + t.Run(tt.name, func(t *testing.T) { + cID := testName(t, "container") + + scratch, _ := testlayers.ScratchSpace(ctx, t, vm, "", "", cache) + spec := testoci.CreateLinuxSpec(ctx, t, cID, + testoci.DefaultLinuxSpecOpts(cID, + ctrdoci.WithProcessArgs(tt.lcowArgs...), + testoci.WithWindowsLayerFolders(append(ls, scratch)))...) + + c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() + if tt.in != "" { + io = testcmd.NewBufferedIOFromString(tt.in) + } + init := testcontainer.Start(ctx, t, c, io) + + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.WaitExitCode(ctx, t, init, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // LCOW + + t.Run("WCOW Hyper-V", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureUVM) + + ls := windowsImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + cID := vm.ID() + "-container" + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() + if tt.in != "" { + io = testcmd.NewBufferedIOFromString(tt.in) + } + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, io) + + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.WaitExitCode(ctx, t, init, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW Hyper-V + + t.Run("WCOW Process", func(t *testing.T) { + requireFeatures(t, featureWCOW) + + ls := windowsImageLayers(ctx, t) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() + if tt.in != "" { + io = testcmd.NewBufferedIOFromString(tt.in) + } + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.WaitExitCode(ctx, t, init, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW Process + + t.Run("WCOW HostProcess", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureHostProcess) + + ls := windowsImageLayers(ctx, t) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() + if tt.in != "" { + io = testcmd.NewBufferedIOFromString(tt.in) + } + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.WaitExitCode(ctx, t, init, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW HostProcess +} + +func TestContainerExec(t *testing.T) { + requireFeatures(t, featureContainer) + requireAnyFeature(t, featureUVM, featureLCOW, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + + t.Run("LCOW", func(t *testing.T) { + requireFeatures(t, featureLCOW, featureUVM) + + opts := defaultLCOWOptions(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, opts) + + ls := linuxImageLayers(ctx, t) + scratch, _ := testlayers.ScratchSpace(ctx, t, vm, "", "", "") + + cID := vm.ID() + "-container" + spec := testoci.CreateLinuxSpec(ctx, t, cID, + testoci.DefaultLinuxSpecOpts(cID, + ctrdoci.WithProcessArgs("/bin/sh", "-c", testoci.TailNullArgs), + testoci.WithWindowsLayerFolders(append(ls, scratch)))...) + + c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := testcontainer.Start(ctx, t, c, nil) + t.Cleanup(func() { + testcmd.Kill(ctx, t, init) + testcmd.Wait(ctx, t, init) + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + for _, tt := range ioTests { + if len(tt.lcowArgs) == 0 { + continue + } + + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateLinuxSpec(ctx, t, cID, + testoci.DefaultLinuxSpecOpts(cID, + ctrdoci.WithDefaultPathEnv, + ctrdoci.WithProcessArgs(tt.lcowArgs...))..., + ).Process + io := testcmd.NewBufferedIO() + if tt.in != "" { + io = testcmd.NewBufferedIOFromString(tt.in) + } + p := testcmd.Create(ctx, t, c, ps, io) + testcmd.Start(ctx, t, p) + + testcmd.WaitExitCode(ctx, t, p, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // LCOW + + t.Run("WCOW Hyper-V", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureUVM) + + ls := windowsImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + cID := vm.ID() + "-container" + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcmd.Kill(ctx, t, init) + testcmd.Wait(ctx, t, init) + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + )...).Process + + io := testcmd.NewBufferedIO() + if tt.in != "" { + io = testcmd.NewBufferedIOFromString(tt.in) + } + p := testcmd.Create(ctx, t, c, ps, io) + testcmd.Start(ctx, t, p) + + testcmd.WaitExitCode(ctx, t, p, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW Hyper-V + + t.Run("WCOW Process", func(t *testing.T) { + requireFeatures(t, featureWCOW) + + ls := windowsImageLayers(ctx, t) + + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcmd.Kill(ctx, t, init) + testcmd.Wait(ctx, t, init) + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + )...).Process + + io := testcmd.NewBufferedIO() + if tt.in != "" { + io = testcmd.NewBufferedIOFromString(tt.in) + } + p := testcmd.Create(ctx, t, c, ps, io) + testcmd.Start(ctx, t, p) + + testcmd.WaitExitCode(ctx, t, p, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW Process + + t.Run("WCOW HostProcess", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureHostProcess) + + ls := windowsImageLayers(ctx, t) + + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcmd.Kill(ctx, t, init) + testcmd.Wait(ctx, t, init) + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + )...).Process + + io := testcmd.NewBufferedIO() + if tt.in != "" { + io = testcmd.NewBufferedIOFromString(tt.in) + } + p := testcmd.Create(ctx, t, c, ps, io) + testcmd.Start(ctx, t, p) + + testcmd.WaitExitCode(ctx, t, p, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW HostProcess +} + +func TestContainerExec_DoubleQuotes(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW) + requireAnyFeature(t, featureUVM, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + + dir := `C:\hcsshim test temp dir with spaces` + acl := "CREATOR OWNER:(OI)(CI)(IO)(F)" + cmdLine := fmt.Sprintf(`cmd /C mkdir "%s" && icacls "%s" /grant "%s" /T && icacls "%s"`, dir, dir, acl, dir) + t.Logf("command line:\n%s", cmdLine) + + t.Run("WCOW Hyper-V", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureUVM) + + ls := windowsImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + cID := vm.ID() + "-container" + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcmd.Kill(ctx, t, init) + testcmd.Wait(ctx, t, init) + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(cmdLine), + )...).Process + + io := testcmd.NewBufferedIO() + p := testcmd.Create(ctx, t, c, ps, io) + testcmd.Start(ctx, t, p) + + testcmd.WaitExitCode(ctx, t, p, 0) + io.TestStdOutContains(t, []string{acl}, nil) + }) // WCOW Hyper-V + + t.Run("WCOW Process", func(t *testing.T) { + requireFeatures(t, featureWCOW) + + ls := windowsImageLayers(ctx, t) + + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcmd.Kill(ctx, t, init) + testcmd.Wait(ctx, t, init) + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(cmdLine), + )...).Process + + io := testcmd.NewBufferedIO() + p := testcmd.Create(ctx, t, c, ps, io) + testcmd.Start(ctx, t, p) + + testcmd.WaitExitCode(ctx, t, p, 0) + io.TestStdOutContains(t, []string{acl}, nil) + }) // WCOW Process + + t.Run("WCOW HostProcess", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureHostProcess) + + ls := windowsImageLayers(ctx, t) + + // the directory will be created on the host from inside the HPC, so remove it + // this is mostly to avoid test failures, since `mkdir` errors if the directory already exists + t.Cleanup(func() { _ = util.RemoveAll(dir) }) + + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + testcmd.Kill(ctx, t, init) + testcmd.Wait(ctx, t, init) + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(cmdLine), + )...).Process + + io := testcmd.NewBufferedIO() + p := testcmd.Create(ctx, t, c, ps, io) + testcmd.Start(ctx, t, p) + + testcmd.WaitExitCode(ctx, t, p, 0) + io.TestStdOutContains(t, []string{acl}, nil) + }) // WCOW HostProcess +} diff --git a/test/functional/hostprocess_test.go b/test/functional/hostprocess_test.go new file mode 100644 index 0000000000..5b43847dd0 --- /dev/null +++ b/test/functional/hostprocess_test.go @@ -0,0 +1,400 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + "testing" + + ctrdoci "github.com/containerd/containerd/oci" + "github.com/opencontainers/runtime-spec/specs-go" + + "github.com/Microsoft/hcsshim/internal/jobcontainers" + "github.com/Microsoft/hcsshim/internal/sync" + "github.com/Microsoft/hcsshim/internal/winapi" + "github.com/Microsoft/hcsshim/osversion" + + testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" + testcontainer "github.com/Microsoft/hcsshim/test/internal/container" + testlayers "github.com/Microsoft/hcsshim/test/internal/layers" + testoci "github.com/Microsoft/hcsshim/test/internal/oci" + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" +) + +// TODO: +// - Environment +// - working directory +// - "microsoft.com/hostprocess-rootfs-location" and check that rootfs location exists +// - bind suppport? + +const ( + system = `NT AUTHORITY\System` + localService = `NT AUTHORITY\Local Service` +) + +func TestHostProcess_whoami(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + ls := windowsImageLayers(ctx, t) + + username := getCurrentUsername(ctx, t) + t.Logf("current username: %s", username) + + // theres probably a better way to test for this *shrug* + isSystem := strings.EqualFold(username, system) + + for _, tt := range []struct { + name string + user ctrdoci.SpecOpts + whoiam string + }{ + // Logging in as the current user may require a password. + // Theres noo guarantee that Administrator, DefaultAccount, or Guest are enabled, so + // we cannot use them. + // Best bet is to login into a service user account, which is only possible if we are already + // running from `NT AUTHORITY\System`. + { + name: "username", + user: ctrdoci.WithUser(system), + whoiam: system, + }, + { + name: "username", + user: ctrdoci.WithUser(localService), + whoiam: localService, + }, + { + name: "inherit", + user: testoci.HostProcessInheritUser(), + whoiam: username, + }, + } { + t.Run(tt.name+" "+tt.whoiam, func(t *testing.T) { + if strings.HasPrefix(strings.ToLower(tt.whoiam), `nt authority\`) && !isSystem { + t.Skipf("starting HostProcess with account %q as requires running tests as %q", tt.whoiam, system) + } + + cID := testName(t, "container") + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine("whoami.exe"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + tt.user, + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.WaitExitCode(ctx, t, init, 0) + + io.TestOutput(t, tt.whoiam, nil) + }) + } + + t.Run("newgroup", func(t *testing.T) { + // CreateProcessAsUser needs SE_INCREASE_QUOTA_NAME and SE_ASSIGNPRIMARYTOKEN_NAME + // privileges, which we is not guaranteed for Administrators to have. + // So, if not System or LocalService, skip. + // + // https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessasuserw + if !isSystem { + t.Skipf("starting HostProcess within a new localgroup requires running tests as %q", system) + } + + cID := testName(t, "container") + + groupName := testName(t) + newLocalGroup(ctx, t, groupName) + + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine("whoami.exe"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + ctrdoci.WithUser(groupName), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.WaitExitCode(ctx, t, init, 0) + + hostname := getHostname(ctx, t) + expectedUser := cID[:winapi.UserNameCharLimit] + // whoami returns domain\username + io.TestOutput(t, hostname+`\`+expectedUser, nil) + + checkLocalGroupMember(ctx, t, groupName, expectedUser) + }) +} + +func TestHostProcess_hostname(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + ls := windowsImageLayers(ctx, t) + + hostname := getHostname(ctx, t) + t.Logf("current hostname: %s", hostname) + + cID := testName(t, "container") + + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine("hostname.exe"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.WaitExitCode(ctx, t, init, 0) + + io.TestOutput(t, hostname, nil) +} + +// validate if we see the same volumes on the host as in the container. +func TestHostProcess_mountvol(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + ls := windowsImageLayers(ctx, t) + + cID := testName(t, "container") + + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine("mountvol.exe"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + testcmd.WaitExitCode(ctx, t, init, 0) + + // container has been launched as the containers scratch space is a new volume + volumes, err := exec.CommandContext(ctx, "mountvol.exe").Output() + t.Logf("host mountvol.exe output:\n%s", string(volumes)) + if err != nil { + t.Fatalf("failed to exec mountvol: %v", err) + } + + io.TestOutput(t, string(volumes), nil) +} + +func TestHostProcess_VolumeMount(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + ls := windowsImageLayers(ctx, t) + + dir := t.TempDir() + containerDir := `C:\hcsshim_test\path\in\container` + + tmpfileName := "tmpfile" + containerTmpfile := filepath.Join(containerDir, tmpfileName) + + tmpfile := filepath.Join(dir, tmpfileName) + if err := os.WriteFile(tmpfile, []byte("test"), 0600); err != nil { + t.Fatalf("could not create temp file: %v", err) + } + + for _, tt := range []struct { + name string + hostPath string + containerPath string + cmd string + needsBindFilter bool + }{ + // CRI is responsible for adding `C:` to the start, and converting `/` to `\`, + // so here we make everything how Windows wants it + { + name: "dir absolute", + hostPath: dir, + containerPath: containerDir, + cmd: fmt.Sprintf(`dir.exe %s`, containerDir), + needsBindFilter: true, + }, + { + name: "dir relative", + hostPath: dir, + containerPath: containerDir, + cmd: fmt.Sprintf(`dir.exe %s`, strings.ReplaceAll(containerDir, `C:`, `%CONTAINER_SANDBOX_MOUNT_POINT%`)), + }, + { + name: "file absolute", + hostPath: tmpfile, + containerPath: containerTmpfile, + cmd: fmt.Sprintf(`cmd.exe /c type %s`, containerTmpfile), + needsBindFilter: true, + }, + { + name: "file relative", + hostPath: tmpfile, + containerPath: containerTmpfile, + cmd: fmt.Sprintf(`cmd.exe /c type %s`, strings.ReplaceAll(containerTmpfile, `C:`, `%CONTAINER_SANDBOX_MOUNT_POINT%`)), + }, + } { + t.Run(tt.name, func(t *testing.T) { + if tt.needsBindFilter && !jobcontainers.FileBindingSupported() { + t.Skip("bind filter support is required") + } + + // hpc mount will create the directory on the host, so remove it after test + t.Cleanup(func() { _ = util.RemoveAll(containerDir) }) + + cID := testName(t, "container") + + scratch := testlayers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.cmd), + ctrdoci.WithMounts([]specs.Mount{ + { + Source: tt.hostPath, + Destination: tt.containerPath, + }, + }), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := testcontainer.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := testcmd.NewBufferedIO() // dir.exe and type.exe will error if theres stdout/err to write to + init := testcontainer.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + testcontainer.Kill(ctx, t, c) + testcontainer.Wait(ctx, t, c) + }) + + if ee := testcmd.Wait(ctx, t, init); ee != 0 { + out, err := io.Output() + if out != "" { + t.Logf("stdout:\n%s", out) + } + if err != nil { + t.Logf("stderr:\n%v", err) + } + t.Errorf("got exit code %d, wanted %d", ee, 0) + } + }) + } +} + +func newLocalGroup(ctx context.Context, tb testing.TB, name string) { + tb.Helper() + + c := exec.CommandContext(ctx, "net", "localgroup", name, "/add") + if output, err := c.CombinedOutput(); err != nil { + tb.Logf("command %q output:\n%s", c.String(), strings.TrimSpace(string(output))) + tb.Fatalf("failed to create localgroup %q with: %v", name, err) + } + tb.Logf("created localgroup: %s", name) + + tb.Cleanup(func() { + deleteLocalGroup(ctx, tb, name) + }) +} + +func deleteLocalGroup(ctx context.Context, tb testing.TB, name string) { + tb.Helper() + + c := exec.CommandContext(ctx, "net", "localgroup", name, "/delete") + if output, err := c.CombinedOutput(); err != nil { + tb.Logf("command %q output:\n%s", c.String(), strings.TrimSpace(string(output))) + tb.Fatalf("failed to delete localgroup %q: %v", name, err) + } + tb.Logf("deleted localgroup: %s", name) +} + +// Checks if userName is present in the group `groupName`. +func checkLocalGroupMember(ctx context.Context, tb testing.TB, groupName, userName string) { + tb.Helper() + + c := exec.CommandContext(ctx, "net", "localgroup", groupName) + b, err := c.CombinedOutput() + output := strings.TrimSpace(string(b)) + tb.Logf("command %q output:\n%s", c.String(), output) + if err != nil { + tb.Fatalf("failed to check members for localgroup %q: %v", groupName, err) + } + if !strings.Contains(strings.ToLower(output), strings.ToLower(userName)) { + tb.Fatalf("user %s not present in the local group %s", userName, groupName) + } +} + +func getCurrentUsername(_ context.Context, tb testing.TB) string { + tb.Helper() + + u, err := user.Current() // cached, so no need to save on lookup + if err != nil { + tb.Fatalf("could not lookup current user: %v", err) + } + return u.Username +} + +var hostnameOnce = sync.OnceValue(os.Hostname) + +func getHostname(_ context.Context, tb testing.TB) string { + tb.Helper() + + n, err := hostnameOnce() + if err != nil { + tb.Fatalf("could not get hostname: %v", err) + } + return n +} diff --git a/test/functional/lcow_bench_test.go b/test/functional/lcow_bench_test.go deleted file mode 100644 index 752f002acd..0000000000 --- a/test/functional/lcow_bench_test.go +++ /dev/null @@ -1,106 +0,0 @@ -//go:build windows && functional -// +build windows,functional - -package functional - -import ( - "context" - "testing" - - "github.com/Microsoft/hcsshim/osversion" - - "github.com/Microsoft/hcsshim/test/internal/util" - "github.com/Microsoft/hcsshim/test/pkg/require" - testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" -) - -func BenchmarkLCOW_UVM(b *testing.B) { - requireFeatures(b, featureLCOW, featureUVM) - require.Build(b, osversion.RS5) - - pCtx := util.Context(context.Background(), b) - - b.Run("Create", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) - - opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) - - b.StartTimer() - _, cleanup := testuvm.CreateLCOW(ctx, b, opts) - b.StopTimer() - - cleanup(ctx) - cancel() - } - }) - - b.Run("Start", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) - - opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) - vm, cleanup := testuvm.CreateLCOW(ctx, b, opts) - - b.StartTimer() - if err := vm.Start(ctx); err != nil { - b.Fatalf("could not start UVM: %v", err) - } - b.StopTimer() - - cleanup(ctx) - cancel() - } - }) - - b.Run("Kill", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) - - opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) - vm, cleanup := testuvm.CreateLCOW(ctx, b, opts) - testuvm.Start(ctx, b, vm) - - b.StartTimer() - testuvm.Kill(ctx, b, vm) - if err := vm.WaitCtx(ctx); err != nil { - b.Fatalf("could not kill uvm %q: %v", vm.ID(), err) - } - b.StopTimer() - - cleanup(ctx) - cancel() - } - }) - - b.Run("Close", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) - - opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) - vm, cleanup := testuvm.CreateLCOW(ctx, b, opts) - testuvm.Start(ctx, b, vm) - - b.StartTimer() - if err := vm.CloseCtx(ctx); err != nil { - b.Fatalf("could not kill uvm %q: %v", vm.ID(), err) - } - b.StopTimer() - - cleanup(ctx) - cancel() - } - }) -} diff --git a/test/functional/lcow_container_test.go b/test/functional/lcow_container_test.go deleted file mode 100644 index d5e94fde03..0000000000 --- a/test/functional/lcow_container_test.go +++ /dev/null @@ -1,169 +0,0 @@ -//go:build windows && functional -// +build windows,functional - -package functional - -import ( - "context" - "strings" - "testing" - - ctrdoci "github.com/containerd/containerd/oci" - - "github.com/Microsoft/hcsshim/osversion" - - testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" - testcontainer "github.com/Microsoft/hcsshim/test/internal/container" - testlayers "github.com/Microsoft/hcsshim/test/internal/layers" - testoci "github.com/Microsoft/hcsshim/test/internal/oci" - "github.com/Microsoft/hcsshim/test/internal/util" - "github.com/Microsoft/hcsshim/test/pkg/require" - testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" -) - -func TestLCOW_ContainerLifecycle(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM, featureContainer) - require.Build(t, osversion.RS5) - - ctx := util.Context(namespacedContext(context.Background()), t) - ls := linuxImageLayers(ctx, t) - opts := defaultLCOWOptions(ctx, t) - opts.ID += util.RandNameSuffix() - vm := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - - scratch, _ := testlayers.ScratchSpace(ctx, t, vm, "", "", "") - - spec := testoci.CreateLinuxSpec(ctx, t, t.Name()+util.RandNameSuffix(), - testoci.DefaultLinuxSpecOpts("", - ctrdoci.WithProcessArgs("/bin/sh", "-c", testoci.TailNullArgs), - testoci.WithWindowsLayerFolders(append(ls, scratch)))...) - - c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, t.Name(), hcsOwner) - t.Cleanup(cleanup) - - init := testcontainer.Start(ctx, t, c, nil) - t.Cleanup(func() { - testcontainer.Kill(ctx, t, c) - testcontainer.Wait(ctx, t, c) - }) - testcmd.Kill(ctx, t, init) - testcmd.WaitExitCode(ctx, t, init, testcmd.ForcedKilledExitCode) -} - -var ioTests = []struct { - name string - args []string - in string - want string -}{ - { - name: "true", - args: []string{"/bin/sh", "-c", "true"}, - want: "", - }, - { - name: "echo", - args: []string{"/bin/sh", "-c", `echo -n "hi y'all"`}, - want: "hi y'all", - }, - { - name: "tee", - args: []string{"/bin/sh", "-c", "tee"}, - in: "are you copying me?", - want: "are you copying me?", - }, -} - -func TestLCOW_ContainerIO(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM, featureContainer) - require.Build(t, osversion.RS5) - - ctx := util.Context(namespacedContext(context.Background()), t) - ls := linuxImageLayers(ctx, t) - opts := defaultLCOWOptions(ctx, t) - opts.ID += util.RandNameSuffix() - cache := testlayers.CacheFile(ctx, t, "") - vm := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - - for _, tt := range ioTests { - t.Run(tt.name, func(t *testing.T) { - id := strings.ReplaceAll(t.Name(), "/", "") + util.RandNameSuffix() - scratch, _ := testlayers.ScratchSpace(ctx, t, vm, "", "", cache) - spec := testoci.CreateLinuxSpec(ctx, t, id, - testoci.DefaultLinuxSpecOpts(id, - ctrdoci.WithProcessArgs(tt.args...), - testoci.WithWindowsLayerFolders(append(ls, scratch)))...) - - c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, id, hcsOwner) - t.Cleanup(cleanup) - - io := testcmd.NewBufferedIO() - if tt.in != "" { - io = testcmd.NewBufferedIOFromString(tt.in) - } - init := testcontainer.Start(ctx, t, c, io) - - t.Cleanup(func() { - testcontainer.Kill(ctx, t, c) - testcontainer.Wait(ctx, t, c) - }) - - if e := testcmd.Wait(ctx, t, init); e != 0 { - t.Fatalf("got exit code %d, wanted %d", e, 0) - } - - io.TestOutput(t, tt.want, nil) - }) - } -} - -func TestLCOW_ContainerExec(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM, featureContainer) - require.Build(t, osversion.RS5) - - ctx := util.Context(namespacedContext(context.Background()), t) - ls := linuxImageLayers(ctx, t) - opts := defaultLCOWOptions(ctx, t) - opts.ID += util.RandNameSuffix() - vm := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - - id := strings.ReplaceAll(t.Name(), "/", "") + util.RandNameSuffix() - scratch, _ := testlayers.ScratchSpace(ctx, t, vm, "", "", "") - spec := testoci.CreateLinuxSpec(ctx, t, id, - testoci.DefaultLinuxSpecOpts(id, - ctrdoci.WithProcessArgs("/bin/sh", "-c", testoci.TailNullArgs), - testoci.WithWindowsLayerFolders(append(ls, scratch)))...) - - c, _, cleanup := testcontainer.Create(ctx, t, vm, spec, id, hcsOwner) - t.Cleanup(cleanup) - init := testcontainer.Start(ctx, t, c, nil) - t.Cleanup(func() { - testcmd.Kill(ctx, t, init) - testcmd.Wait(ctx, t, init) - testcontainer.Kill(ctx, t, c) - testcontainer.Wait(ctx, t, c) - }) - - for _, tt := range ioTests { - t.Run(tt.name, func(t *testing.T) { - ps := testoci.CreateLinuxSpec(ctx, t, id, - testoci.DefaultLinuxSpecOpts(id, - // oci.WithTTY, - ctrdoci.WithDefaultPathEnv, - ctrdoci.WithProcessArgs(tt.args...))..., - ).Process - io := testcmd.NewBufferedIO() - if tt.in != "" { - io = testcmd.NewBufferedIOFromString(tt.in) - } - p := testcmd.Create(ctx, t, c, ps, io) - testcmd.Start(ctx, t, p) - - if e := testcmd.Wait(ctx, t, p); e != 0 { - t.Fatalf("got exit code %d, wanted %d", e, 0) - } - - io.TestOutput(t, tt.want, nil) - }) - } -} diff --git a/test/functional/lcow_test.go b/test/functional/lcow_test.go deleted file mode 100644 index a634241b13..0000000000 --- a/test/functional/lcow_test.go +++ /dev/null @@ -1,304 +0,0 @@ -//go:build windows && functional -// +build windows,functional - -package functional - -import ( - "bytes" - "context" - "errors" - "fmt" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opencontainers/runtime-spec/specs-go" - - "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcsoci" - "github.com/Microsoft/hcsshim/internal/lcow" - "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/internal/uvm/scsi" - "github.com/Microsoft/hcsshim/osversion" - - testutilities "github.com/Microsoft/hcsshim/test/internal" - testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" - "github.com/Microsoft/hcsshim/test/internal/util" - "github.com/Microsoft/hcsshim/test/pkg/require" - testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" -) - -// test if closing a waiting (but not starting) uVM succeeds. -func TestLCOW_UVMCreateClose(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM) - require.Build(t, osversion.RS5) - - ctx := util.Context(context.Background(), t) - vm, cleanup := testuvm.CreateLCOW(ctx, t, defaultLCOWOptions(ctx, t)) - - testuvm.Close(ctx, t, vm) - - // also run cleanup to make sure that works fine too - cleanup(ctx) -} - -// test if waiting after creating (but not starting) an LCOW uVM returns. -func TestLCOW_UVMCreateWait(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM) - require.Build(t, osversion.RS5) - - pCtx := util.Context(context.Background(), t) - vm, cleanup := testuvm.CreateLCOW(pCtx, t, defaultLCOWOptions(pCtx, t)) - t.Cleanup(func() { cleanup(pCtx) }) - - ctx, cancel := context.WithTimeout(pCtx, 3*time.Second) - t.Cleanup(cancel) - switch err := vm.WaitCtx(ctx); { - case err == nil: - t.Fatal("wait did not error") - case !errors.Is(err, context.DeadlineExceeded): - t.Fatalf("wait should have errored with '%v'; got '%v'", context.DeadlineExceeded, err) - } -} - -// TestLCOW_UVMNoSCSINoVPMemInitrd starts an LCOW utility VM without a SCSI controller and -// no VPMem device. Uses initrd. -func TestLCOW_UVMNoSCSINoVPMemInitrd(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM) - - ctx := util.Context(context.Background(), t) - opts := defaultLCOWOptions(ctx, t) - opts.SCSIControllerCount = 0 - opts.VPMemDeviceCount = 0 - opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd - opts.RootFSFile = uvm.InitrdFile - opts.KernelDirect = false - opts.KernelFile = uvm.KernelFile - - testLCOWUVMNoSCSISingleVPMem(t, opts, fmt.Sprintf("Command line: initrd=/%s", opts.RootFSFile)) -} - -// TestLCOW_UVMNoSCSISingleVPMemVHD starts an LCOW utility VM without a SCSI controller and -// only a single VPMem device. Uses VPMEM VHD. -func TestLCOW_UVMNoSCSISingleVPMemVHD(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM) - - ctx := util.Context(context.Background(), t) - opts := defaultLCOWOptions(ctx, t) - opts.SCSIControllerCount = 0 - opts.VPMemDeviceCount = 1 - opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD - opts.RootFSFile = uvm.VhdFile - - testLCOWUVMNoSCSISingleVPMem(t, opts, `Command line: root=/dev/pmem0`, `init=/init`) -} - -func testLCOWUVMNoSCSISingleVPMem(t *testing.T, opts *uvm.OptionsLCOW, expected ...string) { - t.Helper() - require.Build(t, osversion.RS5) - requireFeatures(t, featureLCOW, featureUVM) - ctx := util.Context(context.Background(), t) - - lcowUVM := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - - io := testcmd.NewBufferedIO() - // c := cmd.Command(lcowUVM, "dmesg") - c := testcmd.Create(ctx, t, lcowUVM, &specs.Process{Args: []string{"dmesg"}}, io) - testcmd.Start(ctx, t, c) - testcmd.WaitExitCode(ctx, t, c, 0) - - out, err := io.Output() - - if err != nil { - t.Helper() - t.Fatalf("uvm exec failed with: %s", err) - } - - for _, s := range expected { - if !strings.Contains(out, s) { - t.Helper() - t.Fatalf("Expected dmesg output to have %q: %s", s, out) - } - } -} - -// TestLCOW_TimeUVMStartVHD starts/terminates a utility VM booting from VPMem- -// attached root filesystem a number of times. -func TestLCOW_TimeUVMStartVHD(t *testing.T) { - require.Build(t, osversion.RS5) - requireFeatures(t, featureLCOW, featureUVM) - - testLCOWTimeUVMStart(t, false, uvm.PreferredRootFSTypeVHD) -} - -// TestLCOWUVMStart_KernelDirect_VHD starts/terminates a utility VM booting from -// VPMem- attached root filesystem a number of times starting from the Linux -// Kernel directly and skipping EFI. -func TestLCOW_UVMStart_KernelDirect_VHD(t *testing.T) { - require.Build(t, 18286) - requireFeatures(t, featureLCOW, featureUVM) - - testLCOWTimeUVMStart(t, true, uvm.PreferredRootFSTypeVHD) -} - -// TestLCOWTimeUVMStartInitRD starts/terminates a utility VM booting from initrd- -// attached root file system a number of times. -func TestLCOW_TimeUVMStartInitRD(t *testing.T) { - require.Build(t, osversion.RS5) - requireFeatures(t, featureLCOW, featureUVM) - - testLCOWTimeUVMStart(t, false, uvm.PreferredRootFSTypeInitRd) -} - -// TestLCOWUVMStart_KernelDirect_InitRd starts/terminates a utility VM booting -// from initrd- attached root file system a number of times starting from the -// Linux Kernel directly and skipping EFI. -func TestLCOW_UVMStart_KernelDirect_InitRd(t *testing.T) { - require.Build(t, 18286) - requireFeatures(t, featureLCOW, featureUVM) - - testLCOWTimeUVMStart(t, true, uvm.PreferredRootFSTypeInitRd) -} - -func testLCOWTimeUVMStart(t *testing.T, kernelDirect bool, rfsType uvm.PreferredRootFSType) { - t.Helper() - requireFeatures(t, featureLCOW, featureUVM) - - ctx := util.Context(context.Background(), t) - for i := 0; i < 3; i++ { - opts := defaultLCOWOptions(ctx, t) - opts.KernelDirect = kernelDirect - if !kernelDirect { - // can only use the uncompressed kernel with direct boot - opts.KernelFile = uvm.KernelFile - } - opts.VPMemDeviceCount = 32 - opts.PreferredRootFSType = rfsType - switch opts.PreferredRootFSType { - case uvm.PreferredRootFSTypeInitRd: - opts.RootFSFile = uvm.InitrdFile - case uvm.PreferredRootFSTypeVHD: - opts.RootFSFile = uvm.VhdFile - } - - lcowUVM := testuvm.CreateAndStartLCOWFromOpts(context.Background(), t, opts) - testuvm.Close(ctx, t, lcowUVM) - } -} - -func TestLCOWSimplePodScenario(t *testing.T) { - t.Skip("Doesn't work quite yet") - - require.Build(t, osversion.RS5) - requireFeatures(t, featureLCOW, featureUVM, featureContainer) - - layers := linuxImageLayers(context.Background(), t) - - cacheDir := t.TempDir() - cacheFile := filepath.Join(cacheDir, "cache.vhdx") - - // This is what gets mounted for UVM scratch - uvmScratchDir := t.TempDir() - uvmScratchFile := filepath.Join(uvmScratchDir, "uvmscratch.vhdx") - - // Scratch for the first container - c1ScratchDir := t.TempDir() - c1ScratchFile := filepath.Join(c1ScratchDir, "sandbox.vhdx") - - // Scratch for the second container - c2ScratchDir := t.TempDir() - c2ScratchFile := filepath.Join(c2ScratchDir, "sandbox.vhdx") - - lcowUVM := testuvm.CreateAndStartLCOW(context.Background(), t, "uvm") - defer lcowUVM.Close() - - // Populate the cache and generate the scratch file - if err := lcow.CreateScratch(context.Background(), lcowUVM, uvmScratchFile, lcow.DefaultScratchSizeGB, cacheFile); err != nil { - t.Fatal(err) - } - - _, err := lcowUVM.SCSIManager.AddVirtualDisk(context.Background(), uvmScratchFile, false, lcowUVM.ID(), &scsi.MountConfig{}) - if err != nil { - t.Fatal(err) - } - - // Now create the first containers sandbox, populate a spec - if err := lcow.CreateScratch(context.Background(), lcowUVM, c1ScratchFile, lcow.DefaultScratchSizeGB, cacheFile); err != nil { - t.Fatal(err) - } - c1Spec := testutilities.GetDefaultLinuxSpec(t) - c1Folders := append(layers, c1ScratchDir) - c1Spec.Windows.LayerFolders = c1Folders - c1Spec.Process.Args = []string{"echo", "hello", "lcow", "container", "one"} - c1Opts := &hcsoci.CreateOptions{ - Spec: c1Spec, - HostingSystem: lcowUVM, - } - - // Now create the second containers sandbox, populate a spec - if err := lcow.CreateScratch(context.Background(), lcowUVM, c2ScratchFile, lcow.DefaultScratchSizeGB, cacheFile); err != nil { - t.Fatal(err) - } - c2Spec := testutilities.GetDefaultLinuxSpec(t) - c2Folders := append(layers, c2ScratchDir) - c2Spec.Windows.LayerFolders = c2Folders - c2Spec.Process.Args = []string{"echo", "hello", "lcow", "container", "two"} - c2Opts := &hcsoci.CreateOptions{ - Spec: c2Spec, - HostingSystem: lcowUVM, - } - - // Create the two containers - c1hcsSystem, c1Resources, err := hcsoci.CreateContainer(context.Background(), c1Opts) - if err != nil { - t.Fatal(err) - } - c2hcsSystem, c2Resources, err := hcsoci.CreateContainer(context.Background(), c2Opts) - if err != nil { - t.Fatal(err) - } - - // Start them. In the UVM, they'll be in the created state from runc's perspective after this.eg - /// # runc list - //ID PID STATUS BUNDLE CREATED OWNER - //3a724c2b-f389-5c71-0555-ebc6f5379b30 138 running /run/gcs/c/1 2018-06-04T21:23:39.1253911Z root - //7a8229a0-eb60-b515-55e7-d2dd63ffae75 158 created /run/gcs/c/2 2018-06-04T21:23:39.4249048Z root - if err := c1hcsSystem.Start(context.Background()); err != nil { - t.Fatal(err) - } - defer resources.ReleaseResources(context.Background(), c1Resources, lcowUVM, true) //nolint:errcheck - - if err := c2hcsSystem.Start(context.Background()); err != nil { - t.Fatal(err) - } - defer resources.ReleaseResources(context.Background(), c2Resources, lcowUVM, true) //nolint:errcheck - - // Start the init process in each container and grab it's stdout comparing to expected - runInitProcess(t, c1hcsSystem, "hello lcow container one") - runInitProcess(t, c2hcsSystem, "hello lcow container two") -} - -// Helper to run the init process in an LCOW container; verify it exits with exit -// code 0; verify stderr is empty; check output is as expected. -func runInitProcess(t *testing.T, s cow.Container, expected string) { - t.Helper() - var errB bytes.Buffer - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - cmd := &cmd.Cmd{ - Host: s, - Stderr: &errB, - Context: ctx, - } - outb, err := cmd.Output() - if err != nil { - t.Fatalf("stderr: %s", err) - } - out := string(outb) - if strings.TrimSpace(out) != expected { - t.Fatalf("got %q expecting %q", string(out), expected) - } -} diff --git a/test/functional/lcow_uvm_test.go b/test/functional/lcow_uvm_test.go new file mode 100644 index 0000000000..a024d3762f --- /dev/null +++ b/test/functional/lcow_uvm_test.go @@ -0,0 +1,271 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "fmt" + "testing" + + "github.com/opencontainers/runtime-spec/specs-go" + + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + + testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +// TestLCOWUVM_KernelArgs starts an LCOW utility VM and validates the kernel args contain the expected parameters. +func TestLCOWUVM_KernelArgs(t *testing.T) { + require.Build(t, osversion.RS5) + requireFeatures(t, featureLCOW, featureUVM) + + // TODO: + // - opts.VPCIEnabled and `pci=off` + // - opts.ProcessDumpLocation and `-core-dump-location` + // - opts.ConsolePipe/opts.EnableGraphicsConsole and `console=` + + ctx := util.Context(context.Background(), t) + numCPU := int32(2) + + for _, tc := range []struct { + name string + optsFn func(*uvm.OptionsLCOW) + wantArgs []string + notWantArgs []string + wantDmesg []string + notWantDmesg []string + }{ + // + // initrd test cases + // + // Don't test initrd with SCSI or vPMEM, since boot won't use either and the settings + // won't appear in kernel args or dmesg. + // Kernel command line only contains `initrd=/initrd.img` if KernelDirect is disabled, which + // implies booting from a compressed kernel. + + { + name: "initrd kernel", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 0 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + opts.RootFSFile = uvm.InitrdFile + + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + }, + wantArgs: []string{fmt.Sprintf(`initrd=/%s`, uvm.InitrdFile), + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`root=`, `rootwait`, `init=`, `/dev/pmem`, `/dev/sda`, `console=`}, + wantDmesg: []string{`initrd`, `initramfs`}, + }, + { + name: "initrd vmlinux", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 0 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + opts.RootFSFile = uvm.InitrdFile + + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + }, + wantArgs: []string{`8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`root=`, `rootwait`, `init=`, `/dev/pmem`, `/dev/sda`, `console=`}, + wantDmesg: []string{`initrd`, `initramfs`}, + }, + + // + // VHD rootfs test cases + // + + { + name: "no SCSI single vPMEM VHD kernel", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 1 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + }, + wantArgs: []string{`root=/dev/pmem0`, `rootwait`, `init=/init`, + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`initrd=`, `/dev/sda`, `console=`}, + notWantDmesg: []string{`initrd`, `initramfs`}, + }, + { + name: "SCSI no vPMEM VHD kernel", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 1 + opts.VPMemDeviceCount = 0 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + }, + wantArgs: []string{`root=/dev/sda`, `rootwait`, `init=/init`, + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`initrd=`, `/dev/pmem`, `console=`}, + notWantDmesg: []string{`initrd`, `initramfs`}, + }, + { + name: "no SCSI single vPMEM VHD vmlinux", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 1 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + }, + wantArgs: []string{`root=/dev/pmem0`, `rootwait`, `init=/init`, + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`initrd=`, `/dev/sda`, `console=`}, + notWantDmesg: []string{`initrd`, `initramfs`}, + }, + { + name: "SCSI no vPMEM VHD vmlinux", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 1 + opts.VPMemDeviceCount = 0 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + }, + wantArgs: []string{`root=/dev/sda`, `rootwait`, `init=/init`, + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`initrd=`, `/dev/pmem`, `console=`}, + notWantDmesg: []string{`initrd`, `initramfs`}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + opts := defaultLCOWOptions(ctx, t) + opts.ProcessorCount = numCPU + tc.optsFn(opts) + + if opts.KernelDirect { + require.Build(t, 18286) + } + + vm := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) + + // validate the kernel args were constructed as expected + ioArgs := testcmd.NewBufferedIO() + cmdArgs := testcmd.Create(ctx, t, vm, &specs.Process{Args: []string{"cat", "/proc/cmdline"}}, ioArgs) + testcmd.Start(ctx, t, cmdArgs) + testcmd.WaitExitCode(ctx, t, cmdArgs, 0) + + ioArgs.TestStdOutContains(t, tc.wantArgs, tc.notWantArgs) + + // some boot options (notably using initrd) need to validated by looking at dmesg logs + // dmesg will output the kernel command line as + // + // [ 0.000000] Command line: <...> + // + // but its easier/safer to read the args directly from /proc/cmdline + + ioDmesg := testcmd.NewBufferedIO() + cmdDmesg := testcmd.Create(ctx, t, vm, &specs.Process{Args: []string{"dmesg"}}, ioDmesg) + testcmd.Start(ctx, t, cmdDmesg) + testcmd.WaitExitCode(ctx, t, cmdDmesg, 0) + + ioDmesg.TestStdOutContains(t, tc.wantDmesg, tc.notWantDmesg) + }) + } +} + +// TestLCOWUVM_Boot starts and terminates a utility VM multiple times using different boot options. +func TestLCOWUVM_Boot(t *testing.T) { + require.Build(t, osversion.RS5) + requireFeatures(t, featureLCOW, featureUVM) + + numIters := 3 + ctx := util.Context(context.Background(), t) + + for _, tc := range []struct { + name string + optsFn func(*uvm.OptionsLCOW) + }{ + { + name: "vPMEM no kernel direct initrd", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + + opts.RootFSFile = uvm.InitrdFile + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + + opts.VPMemDeviceCount = 32 + }, + }, + { + name: "vPMEM kernel direct initrd", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + + opts.RootFSFile = uvm.InitrdFile + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + + opts.VPMemDeviceCount = 32 + }, + }, + { + name: "vPMEM no kernel direct VHD", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + + opts.RootFSFile = uvm.VhdFile + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + + opts.VPMemDeviceCount = 32 + }, + }, + { + name: "vPMEM kernel direct VHD", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.VPMemDeviceCount = 32 + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + for i := 0; i < numIters; i++ { + // create new options every time, in case they are modified during uVM creation + opts := defaultLCOWOptions(ctx, t) + tc.optsFn(opts) + + // should probably short circuit earlied, but this will skip all subsequent iterations, which works + if opts.KernelDirect { + require.Build(t, 18286) + } + + vm := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) + testuvm.Close(ctx, t, vm) + } + }) + } +} diff --git a/test/functional/main_test.go b/test/functional/main_test.go index b2f5f9d5cc..d51bd2930e 100644 --- a/test/functional/main_test.go +++ b/test/functional/main_test.go @@ -40,6 +40,8 @@ import ( testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" ) +// TODO: common cmd.Cmd tests on different hosts: start, exec, double start, exit code, etc + // owner field for uVMs. const hcsOwner = "hcsshim-functional-tests" @@ -223,6 +225,8 @@ func runTests(m *testing.M) error { // interfering with benchmarking output if util.RunningBenchmarks() { util.PrintAdditionalBenchmarkConfig() + // also print out the features used as part of the benchmarking config + fmt.Printf("features: %s\n", flagFeatures.Strings()) provider, err := etw.NewProviderWithOptions("Microsoft.Virtualization.RunHCS") if err != nil { @@ -259,6 +263,11 @@ func requireFeatures(tb testing.TB, features ...string) { require.Features(tb, flagFeatures, features...) } +func requireAnyFeature(tb testing.TB, features ...string) { + tb.Helper() + require.AnyFeature(tb, flagFeatures, features...) +} + func defaultLCOWOptions(ctx context.Context, tb testing.TB) *uvm.OptionsLCOW { tb.Helper() diff --git a/test/functional/uvm_bench_test.go b/test/functional/uvm_bench_test.go new file mode 100644 index 0000000000..27d5129a4f --- /dev/null +++ b/test/functional/uvm_bench_test.go @@ -0,0 +1,123 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "testing" + + "github.com/Microsoft/hcsshim/osversion" + + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +func BenchmarkUVM(b *testing.B) { + requireFeatures(b, featureUVM) + requireAnyFeature(b, featureLCOW, featureWCOW) + require.Build(b, osversion.RS5) + + pCtx := util.Context(context.Background(), b) + + for _, tt := range []struct { + feature string + createOpts func(context.Context, testing.TB) any + }{ + { + feature: featureLCOW, + //nolint: thelper + createOpts: func(ctx context.Context, tb testing.TB) any { return defaultLCOWOptions(ctx, tb) }, + }, + { + feature: featureWCOW, + //nolint: thelper + createOpts: func(ctx context.Context, tb testing.TB) any { return defaultWCOWOptions(ctx, tb) }, + }, + } { + b.Run(tt.feature, func(b *testing.B) { + requireFeatures(b, tt.feature) + + b.Run("Create", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + opts := tt.createOpts(ctx, b) + + b.StartTimer() + _, cleanup := testuvm.Create(ctx, b, opts) + b.StopTimer() + + cleanup(ctx) + cancel() + } + }) + + b.Run("Start", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + opts := tt.createOpts(ctx, b) + vm, cleanup := testuvm.Create(ctx, b, opts) + + b.StartTimer() + if err := vm.Start(ctx); err != nil { + b.Fatalf("could not start UVM: %v", err) + } + b.StopTimer() + + cleanup(ctx) + cancel() + } + }) + + b.Run("Kill", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + opts := tt.createOpts(ctx, b) + vm, cleanup := testuvm.Create(ctx, b, opts) + testuvm.Start(ctx, b, vm) + + b.StartTimer() + testuvm.Kill(ctx, b, vm) + if err := vm.WaitCtx(ctx); err != nil { + b.Fatalf("could not kill uvm %q: %v", vm.ID(), err) + } + b.StopTimer() + + cleanup(ctx) + cancel() + } + }) + + b.Run("Close", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + opts := tt.createOpts(ctx, b) + vm, cleanup := testuvm.Create(ctx, b, opts) + testuvm.Start(ctx, b, vm) + + b.StartTimer() + if err := vm.CloseCtx(ctx); err != nil { + b.Fatalf("could not kill uvm %q: %v", vm.ID(), err) + } + b.StopTimer() + + cleanup(ctx) + cancel() + } + }) + }) + } +} diff --git a/test/functional/uvm_test.go b/test/functional/uvm_test.go new file mode 100644 index 0000000000..90b9713b3e --- /dev/null +++ b/test/functional/uvm_test.go @@ -0,0 +1,71 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Microsoft/hcsshim/osversion" + + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +func TestUVM(t *testing.T) { + requireFeatures(t, featureUVM) + requireAnyFeature(t, featureLCOW, featureWCOW) + require.Build(t, osversion.RS5) + + ctx := util.Context(context.Background(), t) + + for _, tt := range []struct { + feature string + createOpts func(context.Context, testing.TB) any + }{ + { + feature: featureLCOW, + //nolint: thelper + createOpts: func(_ context.Context, tb testing.TB) any { return defaultLCOWOptions(ctx, tb) }, + }, + { + feature: featureWCOW, + //nolint: thelper + createOpts: func(ctx context.Context, tb testing.TB) any { return defaultWCOWOptions(ctx, tb) }, + }, + } { + t.Run(tt.feature, func(t *testing.T) { + requireFeatures(t, tt.feature) + + // test if closing a created (but not started) uVM succeeds. + t.Run("Close_Created", func(t *testing.T) { + vm, cleanup := testuvm.Create(ctx, t, tt.createOpts(ctx, t)) + + testuvm.Close(ctx, t, vm) + + // also run cleanup to make sure that works fine too + cleanup(ctx) + }) + + // test if waiting after creating (but not starting) a uVM times out. + t.Run("Wait_Created", func(t *testing.T) { + vm, cleanup := testuvm.Create(ctx, t, tt.createOpts(ctx, t)) + t.Cleanup(func() { cleanup(ctx) }) + + // arbitrary timeout + timeoutCtx, cancel := context.WithTimeout(ctx, 3*time.Second) + t.Cleanup(cancel) + switch err := vm.WaitCtx(timeoutCtx); { + case err == nil: + t.Fatal("wait did not error") + case !errors.Is(err, context.DeadlineExceeded): + t.Fatalf("wait should have errored with '%v'; got '%v'", context.DeadlineExceeded, err) + } + }) + }) + } +} diff --git a/test/functional/uvm_vsmb_test.go b/test/functional/uvm_vsmb_test.go index 1e4b2b152f..7914d666cd 100644 --- a/test/functional/uvm_vsmb_test.go +++ b/test/functional/uvm_vsmb_test.go @@ -6,10 +6,12 @@ package functional import ( "context" "errors" + "fmt" + "os" + "path/filepath" "testing" "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/osversion" "github.com/Microsoft/hcsshim/test/internal/util" @@ -17,72 +19,153 @@ import ( testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" ) -// TestVSMB tests adding/removing VSMB layers from a v2 Windows utility VM. -func TestVSMB(t *testing.T) { - t.Skip("not yet updated") +// TODO: vSMB benchmarks +// TODO: re-add a removed directmapped vSMB share +// TODO: add vSMB to created-but-not-started (or closed) uVM +// TestVSMB_WCOW tests adding/removing VSMB layers from a v2 Windows utility VM. +func TestVSMB_WCOW(t *testing.T) { require.Build(t, osversion.RS5) requireFeatures(t, featureWCOW, featureUVM, featureVSMB) - ctx := util.Context(context.Background(), t) - //nolint:staticcheck // SA1019: deprecated; will be replaced when test is updated - uvm, _, _ := testuvm.CreateWCOWUVM(ctx, t, t.Name(), "microsoft/nanoserver") - defer uvm.Close() - - dir := t.TempDir() - var iterations uint32 = 64 - options := uvm.DefaultVSMBOptions(true) - options.TakeBackupPrivilege = true - for i := 0; i < int(iterations); i++ { - if _, err := uvm.AddVSMB(ctx, dir, options); err != nil { - t.Fatalf("AddVSMB failed: %s", err) - } - } + ctx := util.Context(namespacedContext(context.Background()), t) - // Remove them all - for i := 0; i < int(iterations); i++ { - if err := uvm.RemoveVSMB(ctx, dir, true); err != nil { - t.Fatalf("RemoveVSMB failed: %s", err) + type testCase struct { + name string + backupPriv bool + readOnly bool + noDirectMap bool + } + tests := make([]testCase, 0, 8) + for _, ro := range []bool{true, false} { + for _, backup := range []bool{true, false} { + for _, noDirectMap := range []bool{true, false} { + n := "RW" + if ro { + n = "RO" + } + if backup { + n += "-backup" + } + if noDirectMap { + n += "-noDirectMap" + } + + tests = append(tests, testCase{ + name: n, + backupPriv: backup, + readOnly: ro, + noDirectMap: noDirectMap, + }) + } } } -} - -// TODO: VSMB for mapped directories - -func TestVSMB_Writable(t *testing.T) { - t.Skip("not yet updated") - require.Build(t, osversion.RS5) - requireFeatures(t, featureWCOW, featureUVM, featureVSMB) - ctx := util.Context(context.Background(), t) - - opts := uvm.NewDefaultOptionsWCOW(t.Name(), "") - opts.NoWritableFileShares = true - //nolint:staticcheck // SA1019: deprecated; will be replaced when test is updated - vm, _, _ := testuvm.CreateWCOWUVMFromOptsWithImage(ctx, t, opts, "microsoft/nanoserver") - defer vm.Close() - - dir := t.TempDir() - options := vm.DefaultVSMBOptions(true) - options.TakeBackupPrivilege = true - options.ReadOnly = false - _, err := vm.AddVSMB(ctx, dir, options) - defer func() { - if err == nil { - return - } - if err = vm.RemoveVSMB(ctx, dir, true); err != nil { - t.Fatalf("RemoveVSMB failed: %s", err) + const iterations = 64 + for _, tt := range tests { + for _, newDir := range []bool{true, false} { + name := tt.name + if newDir { + name += "-newDir" + } + + t.Run("dir-"+name, func(t *testing.T) { + // create a temp directory before creating the uVM, so the uVM will be closed before + // temp dir's cleanup + dir := t.TempDir() + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + options := vm.DefaultVSMBOptions(tt.readOnly) + options.TakeBackupPrivilege = tt.backupPriv + options.NoDirectmap = tt.noDirectMap + t.Logf("vSMB options: %#+v", options) + + var path string + var err error + for i := 0; i < iterations; i++ { + if i == 0 || newDir { + // create a temp directory on the first iteration, or on each subsequent iteration if [testCase.newDir] + // don't need to remove it, since `dir` will be removed whole-sale during test cleanup + if path, err = os.MkdirTemp(dir, ""); err != nil { + t.Fatalf("MkdirTemp: %v", err) + } + } + + opts := *options // create a copy in case its (accidentally) modified + s := testuvm.AddVSMB(ctx, t, vm, path, &opts) + if path != s.HostPath { + t.Fatalf("expected vSMB path: %q; got %q", path, s.HostPath) + } + } + }) + + t.Run("file-"+name, func(t *testing.T) { + // create a temp directory before creating the uVM, so the uVM will be closed before + // temp dir's cleanup + dir := t.TempDir() + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + options := vm.DefaultVSMBOptions(tt.readOnly) + options.TakeBackupPrivilege = tt.backupPriv + options.NoDirectmap = tt.noDirectMap + t.Logf("vSMB options: %#+v", options) + + var path string + var err error + for i := 0; i < iterations; i++ { + if i == 0 || newDir { + // create a temp directory on the first iteration, or on each subsequent iteration if [testCase.newDir] + // don't need to remove it, since `dir` will be removed whole-sale during test cleanup + if path, err = os.MkdirTemp(dir, ""); err != nil { + t.Fatalf("MkdirTemp: %v", err) + } + } + f := filepath.Join(path, fmt.Sprintf("f%d.txt", i)) + if err := os.WriteFile(f, []byte(t.Name()), 0600); err != nil { + t.Fatal(err) + } + + opts := *options // create a copy in case its (accidentally) modified + s := testuvm.AddVSMB(ctx, t, vm, f, &opts) + if path != s.HostPath { + t.Fatalf("expected vSMB path: %q; got %q", path, s.HostPath) + } + } + }) } - }() - - if !errors.Is(err, hcs.ErrOperationDenied) { - t.Fatalf("AddVSMB should have failed with %v instead of: %v", hcs.ErrOperationDenied, err) } - options.ReadOnly = true - _, err = vm.AddVSMB(ctx, dir, options) - if err != nil { - t.Fatalf("AddVSMB failed: %s", err) - } + t.Run("NoWritableFileShares", func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // create a temp directory before creating the uVM, so the uVM will be closed before + // temp dir's cleanup + dir := t.TempDir() + + opts := defaultWCOWOptions(ctx, t) + opts.NoWritableFileShares = true + vm := testuvm.CreateAndStart(ctx, t, opts) + + options := vm.DefaultVSMBOptions(tt.readOnly) + options.TakeBackupPrivilege = tt.backupPriv + options.NoDirectmap = tt.noDirectMap + t.Logf("vSMB options: %#+v", options) + + s, err := vm.AddVSMB(ctx, dir, options) + + t.Cleanup(func() { + if err != nil { + return + } + if err = vm.RemoveVSMB(ctx, s.HostPath, tt.readOnly); err != nil { + t.Fatalf("failed to remove vSMB share: %v", err) + } + }) + + if !tt.readOnly && !errors.Is(err, hcs.ErrOperationDenied) { + t.Fatalf("AddVSMB should have failed with %v instead of: %v", hcs.ErrOperationDenied, err) + } + }) + } + }) } diff --git a/test/functional/wcow_container_bench_test.go b/test/functional/wcow_container_bench_test.go new file mode 100644 index 0000000000..ebddf86eb1 --- /dev/null +++ b/test/functional/wcow_container_bench_test.go @@ -0,0 +1,421 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "errors" + "path/filepath" + "strings" + "testing" + + ctrdoci "github.com/containerd/containerd/oci" + criutil "github.com/containerd/containerd/pkg/cri/util" + "golang.org/x/sys/windows" + + "github.com/Microsoft/hcsshim/internal/cmd" + "github.com/Microsoft/hcsshim/internal/hcsoci" + "github.com/Microsoft/hcsshim/internal/layers" + "github.com/Microsoft/hcsshim/internal/resources" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + + testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" + testcontainer "github.com/Microsoft/hcsshim/test/internal/container" + testlayers "github.com/Microsoft/hcsshim/test/internal/layers" + testoci "github.com/Microsoft/hcsshim/test/internal/oci" + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +func BenchmarkWCOW_Container(b *testing.B) { + requireFeatures(b, featureWCOW) + requireAnyFeature(b, featureContainer, featureUVM, featureHostProcess) + require.Build(b, osversion.RS5) + + pCtx := util.Context(namespacedContext(context.Background()), b) + + for _, tc := range []struct { + name string + createUVM bool + extraOpts []ctrdoci.SpecOpts + killExitCode int + features []string + }{ + { + name: "Hyper-V", + createUVM: true, + killExitCode: int(windows.ERROR_PROCESS_ABORTED), + features: []string{featureUVM}, + }, + { + name: "Process", + killExitCode: int(windows.ERROR_PROCESS_ABORTED), + }, + { + name: "HostProcess", + extraOpts: []ctrdoci.SpecOpts{ + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + }, + // HostProcess containers prepend a `cmd /c` to the command, and killing that returns exit code 1 + killExitCode: 1, + features: []string{featureHostProcess}, + }, + } { + b.Run(tc.name, func(b *testing.B) { + requireFeatures(b, tc.features...) + + b.StopTimer() + b.ResetTimer() + + ls := windowsImageLayers(pCtx, b) + + b.Run("Create", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + var vm *uvm.UtilityVM + if tc.createUVM { + vm = testuvm.CreateAndStart(pCtx, b, defaultWCOWOptions(pCtx, b)) + } + + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + id := criutil.GenerateID() + scratch := testlayers.WCOWScratchDir(ctx, b, "") + spec := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine("cmd /c (exit 0)"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...) + + co := &hcsoci.CreateOptions{ + ID: id, + HostingSystem: vm, + Owner: hcsOwner, + Spec: spec, + // dont create a network namespace on the host side + NetworkNamespace: "", + } + + co.LCOWLayers = &layers.LCOWLayers{ + Layers: make([]*layers.LCOWLayer, 0, len(ls)), + ScratchVHDPath: filepath.Join(scratch, "sandbox.vhdx"), + } + + for _, p := range ls { + co.LCOWLayers.Layers = append(co.LCOWLayers.Layers, &layers.LCOWLayer{VHDPath: filepath.Join(p, "layer.vhd")}) + } + + b.StartTimer() + c, r, err := hcsoci.CreateContainer(ctx, co) + if err != nil { + b.Fatalf("could not create container %q: %v", co.ID, err) + } + b.StopTimer() + + // container creation launches go rountines on the guest that do + // not finish until the init process has terminated. + // so start the container, then clean everything up + init := testcontainer.StartWithSpec(ctx, b, c, spec.Process, nil) + testcmd.WaitExitCode(ctx, b, init, 0) + + testcontainer.Kill(ctx, b, c) + testcontainer.Wait(ctx, b, c) + if err := resources.ReleaseResources(ctx, r, vm, true); err != nil { + b.Errorf("failed to release container resources: %v", err) + } + if err := c.Close(); err != nil { + b.Errorf("could not close container %q: %v", c.ID(), err) + } + + cancel() + } + }) + + b.Run("Start", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + var vm *uvm.UtilityVM + if tc.createUVM { + vm = testuvm.CreateAndStart(pCtx, b, defaultWCOWOptions(pCtx, b)) + } + + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + id := criutil.GenerateID() + scratch := testlayers.WCOWScratchDir(ctx, b, "") + spec := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine("cmd /c (exit 0)"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...) + + c, _, cleanup := testcontainer.Create(ctx, b, vm, spec, id, hcsOwner) + + b.StartTimer() + if err := c.Start(ctx); err != nil { + b.Fatalf("could not start %q: %v", c.ID(), err) + } + b.StopTimer() + + init := testcmd.Create(ctx, b, c, spec.Process, nil) + testcmd.Start(ctx, b, init) + testcmd.WaitExitCode(ctx, b, init, 0) + + testcontainer.Kill(ctx, b, c) + testcontainer.Wait(ctx, b, c) + cleanup() + cancel() + } + }) + + b.Run("InitExec", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + var vm *uvm.UtilityVM + if tc.createUVM { + vm = testuvm.CreateAndStart(pCtx, b, defaultWCOWOptions(pCtx, b)) + } + + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + id := criutil.GenerateID() + scratch := testlayers.WCOWScratchDir(ctx, b, "") + spec := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...) + + c, _, cleanup := testcontainer.Create(ctx, b, vm, spec, id, hcsOwner) + if err := c.Start(ctx); err != nil { + b.Fatalf("could not start %q: %v", c.ID(), err) + } + init := testcmd.Create(ctx, b, c, spec.Process, nil) + + b.StartTimer() + if err := init.Start(); err != nil { + b.Fatalf("failed to start init command: %v", err) + } + b.StopTimer() + + testcmd.Kill(ctx, b, init) + testcmd.WaitExitCode(ctx, b, init, tc.killExitCode) + + testcontainer.Kill(ctx, b, c) + testcontainer.Wait(ctx, b, c) + cleanup() + cancel() + } + }) + + b.Run("InitExecKill", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + var vm *uvm.UtilityVM + if tc.createUVM { + vm = testuvm.CreateAndStart(pCtx, b, defaultWCOWOptions(pCtx, b)) + } + + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + id := criutil.GenerateID() + scratch := testlayers.WCOWScratchDir(ctx, b, "") + spec := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...) + + c, _, cleanup := testcontainer.Create(ctx, b, vm, spec, id, hcsOwner) + init := testcontainer.StartWithSpec(ctx, b, c, spec.Process, nil) + + b.StartTimer() + if ok, err := init.Process.Kill(ctx); !ok { + b.Fatalf("could not deliver kill to init command") + } else if err != nil { + b.Fatalf("could not kill init command: %v", err) + } + + if err := init.Wait(); err != nil { + ee := &cmd.ExitError{} + if !errors.As(err, &ee) { + b.Fatalf("failed to wait on init command: %v", err) + } + if ee.ExitCode() != tc.killExitCode { + b.Fatalf("got exit code %d, wanted %d", ee.ExitCode(), tc.killExitCode) + } + } + b.StopTimer() + + testcontainer.Kill(ctx, b, c) + testcontainer.Wait(ctx, b, c) + cleanup() + cancel() + } + }) + + b.Run("Exec", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + var vm *uvm.UtilityVM + if tc.createUVM { + vm = testuvm.CreateAndStart(pCtx, b, defaultWCOWOptions(pCtx, b)) + } + + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + id := criutil.GenerateID() + scratch := testlayers.WCOWScratchDir(ctx, b, "") + spec := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...) + + c, _, cleanup := testcontainer.Create(ctx, b, vm, spec, id, hcsOwner) + init := testcontainer.StartWithSpec(ctx, b, c, spec.Process, nil) + + ps := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...).Process + + exec := testcmd.Create(ctx, b, c, ps, nil) + + b.StartTimer() + if err := exec.Start(); err != nil { + b.Fatalf("failed to start %q: %v", strings.Join(exec.Spec.Args, " "), err) + } + b.StopTimer() + + testcmd.Kill(ctx, b, exec) + testcmd.WaitExitCode(ctx, b, exec, tc.killExitCode) + + testcmd.Kill(ctx, b, init) + testcmd.WaitExitCode(ctx, b, init, tc.killExitCode) + testcontainer.Kill(ctx, b, c) + testcontainer.Wait(ctx, b, c) + cleanup() + cancel() + } + }) + + b.Run("ExecSync", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + var vm *uvm.UtilityVM + if tc.createUVM { + vm = testuvm.CreateAndStart(pCtx, b, defaultWCOWOptions(pCtx, b)) + } + + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + id := criutil.GenerateID() + scratch := testlayers.WCOWScratchDir(ctx, b, "") + spec := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...) + + c, _, cleanup := testcontainer.Create(ctx, b, vm, spec, id, hcsOwner) + init := testcontainer.StartWithSpec(ctx, b, c, spec.Process, nil) + + ps := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine("cmd /c (exit 0)"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...).Process + + exec := testcmd.Create(ctx, b, c, ps, nil) + + b.StartTimer() + if err := exec.Start(); err != nil { + b.Fatalf("failed to start %q: %v", strings.Join(exec.Spec.Args, " "), err) + } + if err := exec.Wait(); err != nil { + b.Fatalf("failed to wait on %q: %v", strings.Join(exec.Spec.Args, " "), err) + } + b.StopTimer() + + testcmd.Kill(ctx, b, init) + testcmd.WaitExitCode(ctx, b, init, tc.killExitCode) + testcontainer.Kill(ctx, b, c) + testcontainer.Wait(ctx, b, c) + cleanup() + cancel() + } + }) + + b.Run("ContainerKill", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + var vm *uvm.UtilityVM + if tc.createUVM { + vm = testuvm.CreateAndStart(pCtx, b, defaultWCOWOptions(pCtx, b)) + } + + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + id := criutil.GenerateID() + scratch := testlayers.WCOWScratchDir(ctx, b, "") + spec := testoci.CreateWindowsSpec(ctx, b, id, + testoci.DefaultWindowsSpecOpts(id, + append(tc.extraOpts, + ctrdoci.WithProcessCommandLine("cmd /c (exit 0)"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )..., + )...) + + c, _, cleanup := testcontainer.Create(ctx, b, vm, spec, id, hcsOwner) + + init := testcontainer.StartWithSpec(ctx, b, c, spec.Process, nil) + testcmd.WaitExitCode(ctx, b, init, 0) + + b.StartTimer() + testcontainer.Kill(ctx, b, c) + testcontainer.Wait(ctx, b, c) + b.StopTimer() + + cleanup() + cancel() + } + }) + }) + } +} diff --git a/test/functional/wcow_test.go b/test/functional/wcow_uvm_test.go similarity index 100% rename from test/functional/wcow_test.go rename to test/functional/wcow_uvm_test.go diff --git a/test/internal/cmd/cmd.go b/test/internal/cmd/cmd.go index 8e61478919..d7bcc78a38 100644 --- a/test/internal/cmd/cmd.go +++ b/test/internal/cmd/cmd.go @@ -1,5 +1,6 @@ //go:build windows +// This package provides testing wrappers around [github.com/Microsoft/hcsshim/internal/cmd] package cmd import ( diff --git a/test/internal/cmd/io.go b/test/internal/cmd/io.go index 462ee410e6..8a5d05d5da 100644 --- a/test/internal/cmd/io.go +++ b/test/internal/cmd/io.go @@ -5,8 +5,11 @@ package cmd import ( "bytes" "errors" + "strings" "testing" + "github.com/google/go-cmp/cmp" + "github.com/Microsoft/hcsshim/internal/cmd" ) @@ -38,12 +41,45 @@ func (b *BufferedIO) Output() (_ string, err error) { func (b *BufferedIO) TestOutput(tb testing.TB, out string, err error) { tb.Helper() - outGive, errGive := b.Output() - if !errors.Is(errGive, err) { - tb.Fatalf("got stderr: %v; wanted: %v", errGive, err) + outGot, errGot := b.Output() + if !errors.Is(errGot, err) { + tb.Fatalf("got stderr: %v; wanted: %v", errGot, err) + } + + out = strings.ToLower(strings.TrimSpace(out)) + outGot = strings.ToLower(strings.TrimSpace(outGot)) + if diff := cmp.Diff(out, outGot); diff != "" { + tb.Fatalf("stdout mismatch (-want +got):\n%s", diff) + } +} + +func (b *BufferedIO) TestStdOutContains(tb testing.TB, want, notWant []string) { + tb.Helper() + + outGot, err := b.Output() + if err != nil { + tb.Fatalf("got stderr: %v", err) } - if outGive != out { - tb.Fatalf("got stdout %q; wanted %q", outGive, out) + + tb.Logf("searching stdout for substrings\nstdout:\n%s\nwanted substrings:\n%q\nunwanted substrings:\n%q", outGot, want, notWant) + + outGot = strings.ToLower(outGot) + + for _, s := range want { + if !strings.Contains(outGot, strings.ToLower(s)) { + tb.Errorf("stdout does not contain substring:\n%s", s) + } + } + + for _, s := range notWant { + if strings.Contains(outGot, strings.ToLower(s)) { + tb.Errorf("stdout contains substring:\n%s", s) + } + } + + // FailNow() to match behavior of [TestOutput] + if tb.Failed() { + tb.FailNow() } } diff --git a/test/internal/oci/oci.go b/test/internal/oci/oci.go index 197bf94cf0..e82edd02d0 100644 --- a/test/internal/oci/oci.go +++ b/test/internal/oci/oci.go @@ -12,6 +12,8 @@ import ( criopts "github.com/containerd/containerd/pkg/cri/opts" "github.com/opencontainers/runtime-spec/specs-go" + "github.com/Microsoft/hcsshim/pkg/annotations" + "github.com/Microsoft/hcsshim/test/pkg/images" ) @@ -21,6 +23,7 @@ import ( const ( TailNullArgs = "tail -f /dev/null" + PingSelfCmd = "cmd.exe /c ping -t 127.0.0.1" DefaultNamespace = namespaces.Default CRINamespace = criconstants.K8sContainerdNamespace @@ -42,6 +45,22 @@ func DefaultLinuxSpecOpts(nns string, extra ...ctrdoci.SpecOpts) []ctrdoci.SpecO return append(opts, extra...) } +func DefaultWindowsSpecOpts(nns string, extra ...ctrdoci.SpecOpts) []ctrdoci.SpecOpts { + opts := []ctrdoci.SpecOpts{ + // make sure we set the Windows field + func(_ context.Context, _ ctrdoci.Client, _ *containers.Container, s *specs.Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + return nil + }, + criopts.WithoutRoot, + ctrdoci.WithProcessCwd(`C:\`), + ctrdoci.WithWindowsNetworkNamespace(nns), + } + return append(opts, extra...) +} + // DefaultLinuxSpec returns a default OCI spec for a Linux container. // // See [CreateSpecWithPlatform] for more details. @@ -95,3 +114,26 @@ func WithWindowsLayerFolders(layers []string) ctrdoci.SpecOpts { return nil } } + +// AsHostProcessContainer updates the spec to create a HostProcess container. +func AsHostProcessContainer() ctrdoci.SpecOpts { + return func(_ context.Context, _ ctrdoci.Client, _ *containers.Container, s *specs.Spec) error { + if s.Annotations == nil { + s.Annotations = make(map[string]string) + } + s.Annotations[annotations.HostProcessContainer] = "true" + return nil + } +} + +// HostProcessInheritUser updates the spec to allow the HostProcess container to inherit the current +// user's token. +func HostProcessInheritUser() ctrdoci.SpecOpts { + return func(_ context.Context, _ ctrdoci.Client, _ *containers.Container, s *specs.Spec) error { + if s.Annotations == nil { + s.Annotations = make(map[string]string) + } + s.Annotations[annotations.HostProcessInheritUser] = "true" + return nil + } +} diff --git a/test/pkg/uvm/uvm.go b/test/pkg/uvm/uvm.go index 2cd0a36d51..817679a7d4 100644 --- a/test/pkg/uvm/uvm.go +++ b/test/pkg/uvm/uvm.go @@ -25,6 +25,38 @@ func newCleanupFn(_ context.Context, tb testing.TB, vm *uvm.UtilityVM) CleanupFn } } +// TODO: create interface in "internal/uvm" that both [OptionsLCOW] and [OptionsWCOW] implement +// +// can't use generic interface { OptionsLCOW | OptionsWCOW } since that is a type constraint and requires +// making all calls generic as well. + +// Create creates a utility VM with the passed opts. +func Create(ctx context.Context, tb testing.TB, opts any) (*uvm.UtilityVM, CleanupFn) { + tb.Helper() + + switch opts := opts.(type) { + case *uvm.OptionsLCOW: + return CreateLCOW(ctx, tb, opts) + case *uvm.OptionsWCOW: + return CreateWCOW(ctx, tb, opts) + } + tb.Fatalf("unknown uVM creation options: %T", opts) + return nil, nil +} + +// CreateAndStartWCOWFromOpts creates a utility VM with the specified options. +// +// The cleanup function will be added to `tb.Cleanup`. +func CreateAndStart(ctx context.Context, tb testing.TB, opts any) *uvm.UtilityVM { + tb.Helper() + + vm, cleanup := Create(ctx, tb, opts) + Start(ctx, tb, vm) + tb.Cleanup(func() { cleanup(ctx) }) + + return vm +} + func Start(ctx context.Context, tb testing.TB, vm *uvm.UtilityVM) { tb.Helper() err := vm.Start(ctx)