Difference between revisions of "SLURM"
From wiki.hpc.mk
Line 6: | Line 6: | ||
<nowiki>#</nowiki>!/bin/bash | <nowiki>#</nowiki>!/bin/bash | ||
{| class="wikitable" | |||
<nowiki>#</nowiki>SBATCH --ntasks-per-node=2 | |+ | ||
<nowiki>#</nowiki>SBATCH --time=1:00:00 | |- | ||
<nowiki>#</nowiki>SBATCH --job-name=test_job | !Parameters!!Description | ||
<nowiki>#</nowiki>SBATCH --mem=1G | |- | ||
<nowiki>#</nowiki>SBATCH --error=testerror_%j.error | |<nowiki>#</nowiki>SBATCH --ntasks-per-node=2 ||# Number of tasks per phisical CPU core | ||
<nowiki>#</nowiki>SBATCH --cpus-per-task=1 | |- | ||
<nowiki>#</nowiki>SBATCH --output=testoutput_%j.out | |<nowiki>#</nowiki>SBATCH --time=1:00:00||# Script duration (days-hrs:min:sec) | ||
<nowiki>#</nowiki>SBATCH --gres=gpu:2 | |- | ||
<nowiki>#</nowiki>SBATCH --nodelist=cuda4 | |<nowiki>#</nowiki>SBATCH --job-name=test_job|| # Job name | ||
|- | |||
|<nowiki>#</nowiki>SBATCH --mem=1G||# Ram memory for rendering (e.g. 1G, 2G, 4G) | |||
|- | |||
|<nowiki>#</nowiki>SBATCH --error=testerror_%j.error||# Print the errors that occur when executing the job | |||
|- | |||
|<nowiki>#</nowiki>SBATCH --cpus-per-task=1||# Number of processors required for a single task | |||
|- | |||
|<nowiki>#</nowiki>SBATCH --output=testoutput_%j.out||# Print the results from scripts and the values it returns | |||
|- | |||
|<nowiki>#</nowiki>SBATCH --gres=gpu:2||# Number of cards per one nod allocated for the job | |||
|- | |||
|<nowiki>#</nowiki>SBATCH --nodelist=cuda4 || # Executing on specific nodes, e.g. cuda4 is for executing only on cuda4 host | |||
|} |
Revision as of 12:33, 27 August 2021
Initiate and manage SLURM tasks
Most used parameters:
#!/bin/bash
Parameters | Description |
---|---|
#SBATCH --ntasks-per-node=2 | # Number of tasks per phisical CPU core |
#SBATCH --time=1:00:00 | # Script duration (days-hrs:min:sec) |
#SBATCH --job-name=test_job | # Job name |
#SBATCH --mem=1G | # Ram memory for rendering (e.g. 1G, 2G, 4G) |
#SBATCH --error=testerror_%j.error | # Print the errors that occur when executing the job |
#SBATCH --cpus-per-task=1 | # Number of processors required for a single task |
#SBATCH --output=testoutput_%j.out | # Print the results from scripts and the values it returns |
#SBATCH --gres=gpu:2 | # Number of cards per one nod allocated for the job |
#SBATCH --nodelist=cuda4 | # Executing on specific nodes, e.g. cuda4 is for executing only on cuda4 host |