#!/usr/bin/env expect ############################################################################ # Purpose: Test of Slurm functionality # Confirm that a job executes with the proper node count # (--nodes option). # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "WARNING: ..." with an explanation of why the test can't be made, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ # Copyright (C) 2002-2007 The Regents of the University of California. # Copyright (C) 2008-2010 Lawrence Livermore National Security. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette # CODE-OCEC-09-009. All rights reserved. # # This file is part of Slurm, a resource management program. # For details, see . # Please also read the included file: DISCLAIMER. # # Slurm is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with Slurm; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals set test_id "15.19" set exit_code 0 print_header $test_id # # Submit a 1 node job and validate that we don't get more than one # set job_id 0 set host_0 "" set task_cnt 0 set timeout $max_job_delay set salloc_pid [spawn $salloc -N1-1 -t1 $srun -c 1 -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "(Task count specification invalid|configuration not available)" { if { [test_front_end] } { send_user "\nWARNING: error expected, Testing is incompatible with front-end systems\n" } else { send_user "\nWARNING: test not compatible with this configuration\n" } if {$job_id != 0} { cancel_job $job_id } exit $exit_code } -re "($number): *($alpha_numeric_under)" { if {$task_cnt == 0} { set host_0 $expect_out(2,string) } incr task_cnt exp_continue } timeout { send_user "\nFAILURE: salloc not responding\n" if {$job_id != 0} { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] set exit_code 1 } eof { wait } } if {[string compare $host_0 ""] == 0} { send_user "\nFAILURE: Did not get SLURMD_NODENAME of task 0\n" set exit_code 1 exit $exit_code } set alloc_fail 0 set job_id 0 set task_cnt2 0 set salloc_pid [spawn $salloc -N1-1 -w DUMMY_HOSTNAME -t1 $srun -n [expr $task_cnt + 1] -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "($number):" { incr task_cnt2 exp_continue } -re "Invalid node name specified" { send_user "\nThis error is expected, no worries\n" set alloc_fail 1 } timeout { send_user "\nFAILURE: salloc not responding\n" if {$job_id != 0} { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] set exit_code 1 } eof { wait } } if { $alloc_fail == 0 } { send_user "\nFAILURE: Did not generate expected error message\n" set exit_code 1 } if { $task_cnt2 != 0 } { send_user "\nFAILURE: allocated more tasks than processors\n" set exit_code 1 } # # Submit a 1 node job # set job_id 0 set host_0 "" set host_1 "" set salloc_pid [spawn $salloc -N1-1 -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "($number): *($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } if {$expect_out(1,string) == 1} { set host_1 $expect_out(2,string) } exp_continue } timeout { send_user "\nFAILURE: salloc not responding\n" if {$job_id != 0} { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] set exit_code 1 } eof { wait } } # # Verify node count # if {[string compare $host_0 ""] == 0} { send_user "\nFAILURE: Did not get SLURM_NODENAME of task 0\n" set exit_code 1 } if {[string compare $host_1 ""] != 0} { send_user "\nFAILURE: Started two tasks instead of one\n" } if {[test_front_end] != 0} { send_user "\nWARNING: Additional testing is incompatible with front-end systems\n" exit $exit_code } # # Submit a 1 to 3 node job # set job_id 0 set host_0 "" set host_1 "" set host_2 "" set host_3 "" set timeout $max_job_delay set salloc_pid [spawn $salloc -N1-3 -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "($number): *($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } if {$expect_out(1,string) == 1} { set host_1 $expect_out(2,string) } if {$expect_out(1,string) == 2} { set host_2 $expect_out(2,string) } if {$expect_out(1,string) == 3} { set host_3 $expect_out(2,string) } exp_continue } timeout { send_user "\nFAILURE: salloc not responding\n" if {$job_id != 0} { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] set exit_code 1 } eof { wait } } # # Verify node count # if {[string compare $host_0 ""] == 0} { send_user "\nFAILURE: Did not get SLURMD_NODENAME of task 0\n" set exit_code 1 } if {[string compare $host_3 ""] != 0} { send_user "\nFAILURE: Started more than three tasks\n" set exit_code 1 } set dup_host 0 if {[string compare $host_0 $host_1] == 0} { set dup_host 1 } if {[string compare $host_0 $host_2] == 0} { set dup_host 1 } if {[string compare $host_0 $host_3] == 0} { set dup_host 1 } if {$dup_host == 1} { send_user "\nFAILURE: Re-used a node in the allocation\n" set exit_code 1 } # find out if we have enough nodes to test functionality set partition [default_partition] set node_count [get_node_cnt_in_part $partition] if { $node_count < 2 } { send_user "WARNING: Insufficient nodes in default partition to continue ($node_count < 2)\n" if {$exit_code == 0} { send_user "\nSUCCESS\n" } exit $exit_code } # # Submit a 2 to 3 node job # set job_id 0 set host_0 "" set host_1 "" set host_2 "" set host_3 "" set timeout $max_job_delay set salloc_pid [spawn $salloc -N2-3 -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "salloc: error" { send_user "\nWARNING: can't test salloc task distribution\n" if {$job_id != 0} { cancel_job $job_id } exit $exit_code } -re "($number): *($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } if {$expect_out(1,string) == 1} { set host_1 $expect_out(2,string) } if {$expect_out(1,string) == 2} { set host_2 $expect_out(2,string) } if {$expect_out(1,string) == 3} { set host_3 $expect_out(1,string) } exp_continue } timeout { send_user "\nFAILURE: salloc not responding\n" if {$job_id != 0} { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] set exit_code 1 } eof { wait } } # # Verify node count # if {[string compare $host_0 ""] == 0} { send_user "\nFAILURE: Did not get SLURMD_NODENAME of task 0\n" set exit_code 1 } if {[string compare $host_1 ""] == 0} { send_user "\nFAILURE: Did not get SLURMD_NODENAME of task 1\n" set exit_code 1 } if {[string compare $host_3 ""] != 0} { send_user "\nFAILURE: Started more than three tasks\n" set exit_code 1 } set dup_host 0 if {[string compare $host_0 $host_1] == 0} { set dup_host 1 } if {[string compare $host_0 $host_2] == 0} { set dup_host 1 } if {[string compare $host_0 $host_3] == 0} { set dup_host 1 } if {$dup_host == 1} { send_user "\nFAILURE: Re-used a node in the allocation\n" set exit_code 1 } if {$exit_code == 0} { send_user "\nSUCCESS\n" } exit $exit_code