diff --git a/docs/tutorials/openfoam/spack-openfoam.md b/docs/tutorials/openfoam/spack-openfoam.md
index 4342ab8a5e..2fc4d51387 100644
--- a/docs/tutorials/openfoam/spack-openfoam.md
+++ b/docs/tutorials/openfoam/spack-openfoam.md
@@ -75,7 +75,7 @@ which should be open in the Cloud Shell Editor (on the left).
This file describes the cluster you will deploy. It defines:
-* the existing default network from your project
+* a vpc network
* a monitoring dashboard with metrics on your cluster
* a definition of a custom Spack installation
* a startup script that
@@ -135,16 +135,16 @@ controller. This command can be used to view progress and check for completion
of the startup script:
```bash
-gcloud compute instances get-serial-port-output --port 1 --zone us-central1-c --project slurm-spack-openfoam-controller | grep google_metadata_script_runner
+gcloud compute instances get-serial-port-output --port 1 --zone us-central1-c --project spackopenf-controller | grep google_metadata_script_runner
```
When the startup script has finished running you will see the following line as
the final output from the above command:
-> _`slurm-spack-openfoam-controller google_metadata_script_runner: Finished running startup scripts.`_
+> _`spackopenf-controller google_metadata_script_runner: Finished running startup scripts.`_
Optionally while you wait, you can see your deployed VMs on Google Cloud
Console. Open the link below in a new window. Look for
-`slurm-spack-openfoam-controller`. If you don't
+`spackopenf-controller`. If you don't
see your VMs make sure you have the correct project selected (top left).
```text
@@ -204,7 +204,7 @@ OpenFOAM job.
2. Submit the job to Slurm to be scheduled:
```bash
- sbatch /apps/openfoam/submit_openfoam.sh
+ sbatch /opt/apps/openfoam/submit_openfoam.sh
```
3. Once submitted, you can watch the job progress by repeatedly calling the
@@ -218,7 +218,7 @@ The `sbatch` command trigger Slurm to auto-scale up several nodes to run the job
You can refresh the `Compute Engine` > `VM instances` page and see that
additional VMs are being/have been created. These will be named something like
-`slurm-spack-openfoam-compute-0-0`.
+`spackopenf-comput-0`.
When running `squeue`, observe the job status start as `CF` (configuring),
change to `R` (running) once the compute VMs have been created, and finally `CG`
@@ -271,7 +271,7 @@ exit
Run the following command in the cloud shell terminal to destroy the cluster:
```bash
-./ghpc deploy spack-openfoam
+./ghpc destroy spack-openfoam
```
When complete you should see something like:
diff --git a/docs/tutorials/openfoam/spack-openfoam.yaml b/docs/tutorials/openfoam/spack-openfoam.yaml
index c15851fe17..bd2ec7dc70 100644
--- a/docs/tutorials/openfoam/spack-openfoam.yaml
+++ b/docs/tutorials/openfoam/spack-openfoam.yaml
@@ -35,7 +35,7 @@ deployment_groups:
- id: spack-setup
source: community/modules/scripts/spack-setup
settings:
- install_dir: /apps/spack
+ install_dir: /opt/apps/spack
spack_ref: v0.20.0
- id: spack-execute
@@ -95,7 +95,7 @@ deployment_groups:
# fi
# spack buildcache keys --install --trust
- spack config --scope defaults add config:build_stage:/apps/spack/spack-stage
+ spack config --scope defaults add config:build_stage:/opt/apps/spack/spack-stage
spack config --scope defaults add -f /tmp/projections-config.yaml
spack config --scope site add -f /tmp/slurm-external-config.yaml
@@ -124,17 +124,16 @@ deployment_groups:
destination: setup_openfoam.sh
content: |
#!/bin/bash
- source /apps/spack/share/spack/setup-env.sh
+ source /opt/apps/spack/share/spack/setup-env.sh
spack env activate openfoam
- chmod -R a+rwX /apps/spack/var/spack/environments/openfoam
- type: data
- destination: /apps/openfoam/submit_openfoam.sh
+ destination: /opt/apps/openfoam/submit_openfoam.sh
content: |
#!/bin/bash
#SBATCH -N 2
#SBATCH --ntasks-per-node 30
- source /apps/spack/share/spack/setup-env.sh
+ source /opt/apps/spack/share/spack/setup-env.sh
spack env activate openfoam
cd $SLURM_SUBMIT_DIR