Unverified Commit da7bf3c9 authored by Aaron Peikert's avatar Aaron Peikert
Browse files

dockerized example of future based tardis interaction

parents
#!/bin/bash
## Job Resource Interface Definition
##
## ntasks [integer(1)]: Number of required tasks,
## Set larger than 1 if you want to further parallelize
## with MPI within your job.
## ncpus [integer(1)]: Number of required cpus per task,
## Set larger than 1 if you want to further parallelize
## with multicore/parallel within each task.
## walltime [integer(1)]: Walltime for this job, in seconds.
## Must be at least 60 seconds for Slurm to work properly.
## memory [integer(1)]: Memory in megabytes for each cpu.
## Must be at least 100 (when I tried lower values my
## jobs did not start at all).
##
## Default resources can be set in your .batchtools.conf.R by defining the variable
## 'default.resources' as a named list.
<%
# relative paths are not handled well by Slurm
log.file = fs::path_expand(log.file)
-%>
#SBATCH --job-name=<%= job.name %>
#SBATCH --output=<%= log.file %>
#SBATCH --error=<%= log.file %>
#SBATCH --time=<%= ceiling(resources$walltime / 60) %>
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=<%= resources$ncpus %>
#SBATCH --mem-per-cpu=<%= resources$memory %>
<%= if (!is.null(resources$partition)) sprintf(paste0("#SBATCH --partition='", paste(resources$partition, collapse = ","), "'")) %>
<%= if (array.jobs) sprintf("#SBATCH --array=1-%i", nrow(jobs)) else "" %>
## Initialize work environment like
## source /etc/profile
## module add ...
## Export value of DEBUGME environemnt var to slave
export DEBUGME=<%= Sys.getenv("DEBUGME") %>
<%= sprintf("export OMP_NUM_THREADS=%i", resources$omp.threads) -%>
<%= sprintf("export OPENBLAS_NUM_THREADS=%i", resources$blas.threads) -%>
<%= sprintf("export MKL_NUM_THREADS=%i", resources$blas.threads) -%>
## Run R:
## we merge R output with stdout from SLURM, which gets then logged via --output option
singularity exec /home/mpib/peikert/repro-tutorial/reprotutorial.sif Rscript -e 'batchtools::doJobCollection("<%= uri %>")'
*.tar.gz
*.sif
.Rproj.user
.Rhistory
.Rdata
.httr-oauth
.DS_Store
FROM rocker/r-ver:4.0.3
ARG BUILD_DATE=2021-06-01
WORKDIR /home/rstudio
RUN install2.r --error --skipinstalled \
furrr \
future \
future.batchtools \
future.apply \
remotes
RUN installGithub.r \
brandmaier/semtree@189b181
PROJECT=maketest
docker: build
build: Dockerfile
docker build -t $(PROJECT) .
rebuild:
docker build --no-cache -t $(PROJECT) .
save-docker: $(PROJECT).tar.gz
$(PROJECT).tar.gz:
docker save $(PROJECT):latest | gzip > $@docker: build
build: Dockerfile
docker build -t $(PROJECT) .
rebuild:
docker build --no-cache -t $(PROJECT) .
save-docker: $(PROJECT).tar.gz
$(PROJECT).tar.gz:
docker save $(PROJECT):latest | gzip > $@
singularity: $(PROJECT).sif
$(PROJECT).sif: docker
singularity build $@ docker-daemon://$(PROJECT):latest
Version: 1.0
RestoreWorkspace: Default
SaveWorkspace: Default
AlwaysSaveHistory: Default
EnableCodeIndexing: Yes
UseSpacesForTab: Yes
NumSpacesForTab: 2
Encoding: UTF-8
RnwWeave: knitr
LaTeX: XeLaTeX
AutoAppendNewline: Yes
BuildType: Makefile
library(future)
library(future.apply)
library(future.batchtools)
tardis <- parallelly::makeClusterPSOCK("tardis.mpib-berlin.mpg.de",
port='random', user="peikert",
# this R version does not matter
# it only requires that future.batchtools is there
rscript=c("/opt/software/R/4.0.3/bin/Rscript"),
homogeneous = TRUE)
# should be 1 if no hierarchical futures are used
ncpus <- 1
plan(list(tweak(cluster, workers=tardis),
tweak(batchtools_slurm,
workers = 100, #number of tasks that may run parallel on tardis
# the R/package version of the singularity image has to match
# the local machine, therefore check Dockerfile for R version
template = "/home/mpib/peikert/.batchtools.slurm.singularity.tmpl",
resources=list(ncpus=ncpus,
memory='700m',
walltime=6600,
partition=c('gpu'))),
tweak(multicore, workers=ncpus)))
# ok, this is really dumb
pkgs %<-% sessionInfo()
x %<-% future_sapply( 1:10, FUN=function(x){x**x})
x
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment