How to get --machine-file for array tasks?

I have the following sbatch.sub script which defines a job array where each is meant to call run.jl with it’s respective slurm nodefile (please see below). This produces outputs for both array iterations but they say something like:

ex_array_job.156026: No such file or directory

So I have two options:

  1. How can I fix this so --machine-file is correct?
  2. How (if not via --machine-file) do I pass resources to julia for each array iteration?

sbatch.sub:

#!/bin/bash
#SBATCH -A me
#SBATCH --job-name="ex_array_job"
#SBATCH --output="ex_array_job.%j.%N.out"
#SBATCH --partition=standard
#SBATCH -t 01:00:00
#SBATCH --nodes=4               ## total nodes for the job
#SBATCH --ntasks=80             ## number of processes to launch for each array iterration
#SBATCH --cpus-per-task=1       ## number of cores the job needs
#SBATCH --array=1-2             ## number of array tasks is 2 with 80 jobs (2 nodes per job) per array iteration (2 iterations)

export SLURM_NODEFILE=`generate_pbs_nodefile` ## unique to each array task id?
/opt/apps/julia/1.5.1/bin/julia --machine-file $SLURM_NODEFILE /dfs/me/code/scratch/slurm/arrayJob/run.jl

run.jl:

# import all variables
aId = Base.parse(Int, ENV["SLURM_ARRAY_TASK_ID"])
cfg = "/dfs/me/code/scratch/slurm/arrayJob/config.$aId.jl"
include(cfg)
# should have pdir::String now
using Pkg
Pkg.activate(pdir)
using ClusterManagers
using Distributed

@everywhere using Pkg
@everywhere include(eval($cfg))
@everywhere Pkg.activate(pdir)
@everywhere Pkg.instantiate()
@everywhere using ClusterManagers

hosts = []
pids = []
for i in workers()
	host, pid = fetch(@spawnat i (gethostname(), getpid()))
	push!(hosts, host)
	push!(pids, pid)
end

println("\n nprocs: \n")
display(nprocs())

# The Slurm resource allocation is released when all the workers have
# exited
for i in workers()
	rmprocs(i)
end
println("\n array: $aId hosts are: \n")
display(hosts)
println("\n array: $aId pids are: \n")
display(pids)