Compare commits

...

6 Commits

Author SHA1 Message Date
199407b2a1 spec: prerelease 0.5 for testing ppoll
Change-Id: I51deb1c1703a986ba0aa4e02da9f53009554dbb7
2020-07-01 08:49:08 +09:00
5973d66e2d Revert "epoll_wait(): make sure to schedule in offload"
This reverts commit 5e44c9c9f9.

Change-Id: I826336f1ece31a84072c3e62c6c6c68a641e8fb5
2020-06-30 17:11:26 +09:00
d7ef74659b Revert "epoll, ppoll: deschedule on offload, don't do it when exiting system call"
This reverts commit d4056acfc3.

Change-Id: I7df15b9d3957ca571f4b4e2d576799f8b97ae299
2020-06-30 17:11:23 +09:00
ac86affecc mcexec: fix FLIB_AFFINITY_ON_PROCESS mask for McKernel CPU numbers (Fugaku)
Change-Id: If42b139fb53866bcff0809d898d4a2a712946f0c
2020-06-30 16:29:03 +09:00
2026cf8dad mcexec: explicit CPU list in partitoned execution (for Fujitsu's FLIB_AFFINITY_ON_PROCESS)
Change-Id: I05c11f73553de8ccb5f79083ce2115ac57e62584
2020-06-30 16:29:00 +09:00
1d135492c3 mcexec: detect mismatch of mcexec -n and mpirun -ppn
Change-Id: I0c42e3119143da40ea2e69cd9ec99bde78a0ad2a
Refs: #929
2020-06-30 16:28:08 +09:00
15 changed files with 603 additions and 42 deletions

View File

@ -10,7 +10,7 @@ project(mckernel C ASM)
set(MCKERNEL_VERSION "1.7.0")
# See "Fedora Packaging Guidlines -- Versioning"
set(MCKERNEL_RELEASE "0.4")
set(MCKERNEL_RELEASE "0.5")
set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules)
# for rpmbuild

View File

@ -142,4 +142,3 @@ SYSCALL_HANDLED(1045, signalfd)
SYSCALL_DELEGATED(1049, stat)
SYSCALL_DELEGATED(1060, getpgrp)
SYSCALL_HANDLED(1062, time)
SYSCALL_DELEGATED(1069, epoll_wait)

View File

@ -91,7 +91,10 @@ struct program_image_section {
struct get_cpu_set_arg {
int nr_processes;
char *req_cpu_list; // Requested by user-space
int req_cpu_list_len; // Lenght of request string
int *process_rank;
pid_t ppid;
void *cpu_set;
size_t cpu_set_size; // Size in bytes
int *target_core;

View File

@ -587,13 +587,14 @@ extern int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id);
static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
{
struct mcctrl_usrdata *udp = ihk_host_os_get_usrdata(os);
struct mcctrl_part_exec *pe;
struct mcctrl_part_exec *pe = NULL, *pe_itr;
struct get_cpu_set_arg req;
struct mcctrl_cpu_topology *cpu_top, *cpu_top_i;
struct cache_topology *cache_top;
int cpu, cpus_assigned, cpus_to_assign, cpu_prev;
int ret = 0;
int mcexec_linux_numa;
int pe_list_len = 0;
cpumask_t *mcexec_cpu_set = NULL;
cpumask_t *cpus_used = NULL;
cpumask_t *cpus_to_use = NULL;
@ -613,24 +614,126 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
return -EINVAL;
}
pe = &udp->part_exec;
mutex_lock(&pe->lock);
if (copy_from_user(&req, (void *)arg, sizeof(req))) {
printk("%s: error copying user request\n", __FUNCTION__);
pr_err("%s: error copying user request\n", __func__);
ret = -EINVAL;
goto put_and_unlock_out;
goto put_out;
}
/* First process to enter CPU partitioning */
if (pe->nr_processes == -1) {
/* User requested CPU mask? */
if (req.req_cpu_list && req.req_cpu_list_len) {
char *cpu_list = NULL;
cpu_list = kmalloc(req.req_cpu_list_len, GFP_KERNEL);
if (!cpu_list) {
printk("%s: error: allocating CPU list\n", __FUNCTION__);
ret = -ENOMEM;
goto put_out;
}
if (copy_from_user(cpu_list,
req.req_cpu_list, req.req_cpu_list_len)) {
printk("%s: error copying CPU list request\n", __FUNCTION__);
kfree(cpu_list);
ret = -EINVAL;
goto put_out;
}
cpus_used = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
cpus_to_use = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
if (!cpus_to_use || !cpus_used) {
printk("%s: error: allocating CPU mask\n", __FUNCTION__);
ret = -ENOMEM;
kfree(cpu_list);
goto put_out;
}
memset(cpus_used, 0, sizeof(cpumask_t));
memset(cpus_to_use, 0, sizeof(cpumask_t));
/* Parse CPU list */
if (cpulist_parse(cpu_list, cpus_to_use) < 0) {
printk("%s: invalid CPUs requested: %s\n",
__FUNCTION__, cpu_list);
ret = -EINVAL;
kfree(cpu_list);
goto put_out;
}
memcpy(cpus_used, cpus_to_use, sizeof(cpumask_t));
/* Copy mask to user-space */
if (copy_to_user(req.cpu_set, cpus_used,
(req.cpu_set_size < sizeof(cpumask_t) ?
req.cpu_set_size : sizeof(cpumask_t)))) {
printk("%s: error copying mask to user\n", __FUNCTION__);
ret = -EINVAL;
kfree(cpu_list);
goto put_out;
}
/* Copy IKC target core */
cpu = cpumask_next(-1, cpus_used);
if (copy_to_user(req.target_core, &cpu, sizeof(cpu))) {
printk("%s: error copying target core to user\n",
__FUNCTION__);
ret = -EINVAL;
kfree(cpu_list);
goto put_out;
}
/* Save in per-process structure */
memcpy(&ppd->cpu_set, cpus_used, sizeof(cpumask_t));
ppd->ikc_target_cpu = cpu;
printk("%s: %s -> target McKernel CPU: %d\n",
__func__, cpu_list, cpu);
ret = 0;
kfree(cpu_list);
goto put_out;
}
mutex_lock(&udp->part_exec_lock);
/* Find part_exec having same node_proxy */
list_for_each_entry_reverse(pe_itr, &udp->part_exec_list, chain) {
pe_list_len++;
if (pe_itr->node_proxy_pid == req.ppid) {
pe = pe_itr;
break;
}
}
if (!pe) {
/* First process to enter CPU partitioning */
pr_debug("%s: pe_list_len:%d\n", __func__, pe_list_len);
if (pe_list_len >= PE_LIST_MAXLEN) {
/* delete head entry of pe_list */
pe_itr = list_first_entry(&udp->part_exec_list,
struct mcctrl_part_exec, chain);
list_del(&pe_itr->chain);
kfree(pe_itr);
}
pe = kzalloc(sizeof(struct mcctrl_part_exec), GFP_KERNEL);
if (!pe) {
mutex_unlock(&udp->part_exec_lock);
ret = -ENOMEM;
goto put_out;
}
/* Init part_exec */
mutex_init(&pe->lock);
INIT_LIST_HEAD(&pe->pli_list);
pe->nr_processes = req.nr_processes;
pe->nr_processes_left = req.nr_processes;
pe->nr_processes_joined = 0;
pe->node_proxy_pid = req.ppid;
list_add_tail(&pe->chain, &udp->part_exec_list);
dprintk("%s: nr_processes: %d (partitioned exec starts)\n",
__FUNCTION__,
pe->nr_processes);
__func__, pe->nr_processes);
}
mutex_unlock(&udp->part_exec_lock);
mutex_lock(&pe->lock);
if (pe->nr_processes != req.nr_processes) {
printk("%s: error: requested number of processes"
@ -640,7 +743,15 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
goto put_and_unlock_out;
}
if (pe->nr_processes_joined >= pe->nr_processes) {
printk("%s: too many processes have joined to the group of %d\n",
__func__, req.ppid);
ret = -EINVAL;
goto put_and_unlock_out;
}
--pe->nr_processes_left;
++pe->nr_processes_joined;
dprintk("%s: nr_processes: %d, nr_processes_left: %d\n",
__FUNCTION__,
pe->nr_processes,
@ -726,8 +837,6 @@ static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
wake_up_interruptible(&pli_next->pli_wq);
}
/* Reset process counter to start state */
pe->nr_processes = -1;
ret = -ETIMEDOUT;
goto put_and_unlock_out;
}
@ -975,16 +1084,8 @@ next_cpu:
/* Commit used cores to OS structure */
memcpy(&pe->cpus_used, cpus_used, sizeof(*cpus_used));
/* Reset if last process */
if (pe->nr_processes_left == 0) {
dprintk("%s: nr_processes: %d (partitioned exec ends)\n",
__FUNCTION__,
pe->nr_processes);
pe->nr_processes = -1;
memset(&pe->cpus_used, 0, sizeof(pe->cpus_used));
}
/* Otherwise wake up next process in list */
else {
/* If not last process, wake up next process in list */
if (pe->nr_processes_left != 0) {
++pe->process_rank;
pli_next = list_first_entry(&pe->pli_list,
struct process_list_item, list);
@ -997,11 +1098,14 @@ next_cpu:
ret = 0;
put_and_unlock_out:
mutex_unlock(&pe->lock);
put_out:
mcctrl_put_per_proc_data(ppd);
kfree(cpus_to_use);
kfree(cpus_used);
kfree(mcexec_cpu_set);
mcctrl_put_per_proc_data(ppd);
mutex_unlock(&pe->lock);
return ret;
}

View File

@ -513,6 +513,7 @@ int prepare_ikc_channels(ihk_os_t os)
init_waitqueue_head(&usrdata->wq_procfs);
mutex_init(&usrdata->reserve_lock);
mutex_init(&usrdata->part_exec_lock);
for (i = 0; i < MCCTRL_PER_PROC_DATA_HASH_SIZE; ++i) {
INIT_LIST_HEAD(&usrdata->per_proc_data_hash[i]);
@ -521,10 +522,8 @@ int prepare_ikc_channels(ihk_os_t os)
INIT_LIST_HEAD(&usrdata->cpu_topology_list);
INIT_LIST_HEAD(&usrdata->node_topology_list);
INIT_LIST_HEAD(&usrdata->part_exec_list);
mutex_init(&usrdata->part_exec.lock);
INIT_LIST_HEAD(&usrdata->part_exec.pli_list);
usrdata->part_exec.nr_processes = -1;
INIT_LIST_HEAD(&usrdata->wakeup_descs_list);
spin_lock_init(&usrdata->wakeup_descs_lock);
@ -580,6 +579,18 @@ void destroy_ikc_channels(ihk_os_t os)
kfree(usrdata->channels);
kfree(usrdata->ikc2linux);
mutex_lock(&usrdata->part_exec_lock);
while (!list_empty(&usrdata->part_exec_list)) {
struct mcctrl_part_exec *pe;
pe = list_first_entry(&usrdata->part_exec_list,
struct mcctrl_part_exec, chain);
list_del(&pe->chain);
kfree(pe);
}
mutex_unlock(&usrdata->part_exec_lock);
kfree(usrdata);
}

View File

@ -324,13 +324,20 @@ struct process_list_item {
wait_queue_head_t pli_wq;
};
#define PE_LIST_MAXLEN 5
struct mcctrl_part_exec {
struct mutex lock;
int nr_processes;
/* number of processes to let in / out the synchronization point */
int nr_processes_left;
/* number of processes which have joined the partition */
int nr_processes_joined;
int process_rank;
pid_t node_proxy_pid;
cpumask_t cpus_used;
struct list_head pli_list;
struct list_head chain;
};
#define CPU_LONGS (((NR_CPUS) + (BITS_PER_LONG) - 1) / (BITS_PER_LONG))
@ -353,6 +360,7 @@ struct mcctrl_usrdata {
int job_pos;
int mcctrl_dma_abort;
struct mutex reserve_lock;
struct mutex part_exec_lock;
unsigned long last_thread_exec;
wait_queue_head_t wq_procfs;
struct list_head per_proc_data_hash[MCCTRL_PER_PROC_DATA_HASH_SIZE];
@ -368,7 +376,7 @@ struct mcctrl_usrdata {
nodemask_t numa_online;
struct list_head cpu_topology_list;
struct list_head node_topology_list;
struct mcctrl_part_exec part_exec;
struct list_head part_exec_list;
int perf_event_num;
};

View File

@ -2211,6 +2211,64 @@ int main(int argc, char **argv)
pthread_spin_init(&overlay_fd_lock, 0);
/* XXX: Fugaku: Fujitsu process placement fix */
if (getenv("FLIB_AFFINITY_ON_PROCESS")) {
char *cpu_s;
int flib_size;
char *flib_aff_orig, *flib_aff;
int cpu, off = 0;
flib_aff_orig = strdup(getenv("FLIB_AFFINITY_ON_PROCESS"));
if (!flib_aff_orig) {
fprintf(stderr, "error: dupping FLIB_AFFINITY_ON_PROCESS\n");
exit(EXIT_FAILURE);
}
flib_size = strlen(flib_aff_orig) * 2;
flib_aff = malloc(flib_size);
if (!flib_aff) {
fprintf(stderr, "error: allocating memory for "
"FLIB_AFFINITY_ON_PROCESS\n");
exit(EXIT_FAILURE);
}
memset(flib_aff, 0, flib_size);
cpu_s = strtok(flib_aff_orig, ",");
while (cpu_s) {
int ret;
/* "Shift" left by 12 CPUs */
cpu = atoi(cpu_s) - 12;
/* Prepend "," */
if (off > 0) {
ret = snprintf(flib_aff + off, flib_size - off, "%s", ",");
if (ret < 0) {
fprintf(stderr, "error: constructing "
"FLIB_AFFINITY_ON_PROCESS\n");
exit(EXIT_FAILURE);
}
off += ret;
}
ret = snprintf(flib_aff + off, flib_size - off, "%d", cpu);
if (ret < 0) {
fprintf(stderr, "error: constructing "
"FLIB_AFFINITY_ON_PROCESS\n");
exit(EXIT_FAILURE);
}
off += ret;
cpu_s = strtok(NULL, ",");
}
__dprintf("FLIB_AFFINITY_ON_PROCESS: %s -> %s\n",
getenv("FLIB_AFFINITY_ON_PROCESS"), flib_aff);
setenv("FLIB_AFFINITY_ON_PROCESS", flib_aff, 1);
}
ld_preload_init();
#ifdef ADD_ENVS_OPTION
@ -2483,9 +2541,12 @@ int main(int argc, char **argv)
CPU_ZERO(&mcexec_cpu_set);
cpu_set_arg.req_cpu_list = NULL;
cpu_set_arg.req_cpu_list_len = 0;
cpu_set_arg.cpu_set = (void *)&desc->cpu_set;
cpu_set_arg.cpu_set_size = sizeof(desc->cpu_set);
cpu_set_arg.nr_processes = nr_processes;
cpu_set_arg.ppid = getppid();
cpu_set_arg.target_core = &target_core;
cpu_set_arg.process_rank = &process_rank;
cpu_set_arg.mcexec_linux_numa = &mcexec_linux_numa;
@ -2493,6 +2554,16 @@ int main(int argc, char **argv)
cpu_set_arg.mcexec_cpu_set_size = sizeof(mcexec_cpu_set);
cpu_set_arg.ikc_mapped = &ikc_mapped;
/* Fugaku specific: Fujitsu CPU binding */
if (getenv("FLIB_AFFINITY_ON_PROCESS")) {
cpu_set_arg.req_cpu_list =
getenv("FLIB_AFFINITY_ON_PROCESS");
cpu_set_arg.req_cpu_list_len =
strlen(cpu_set_arg.req_cpu_list) + 1;
__dprintf("%s: requesting CPUs: %s\n",
__func__, cpu_set_arg.req_cpu_list);
}
if (ioctl(fd, MCEXEC_UP_GET_CPUSET, (void *)&cpu_set_arg) != 0) {
perror("getting CPU set for partitioned execution");
close(fd);
@ -2501,6 +2572,12 @@ int main(int argc, char **argv)
desc->cpu = target_core;
desc->process_rank = process_rank;
/* Fugaku specific: Fujitsu node-local rank */
if (getenv("PLE_RANK_ON_NODE")) {
desc->process_rank = atoi(getenv("PLE_RANK_ON_NODE"));
__dprintf("%s: rank: %d, target CPU: %d\n",
__func__, desc->process_rank, desc->cpu);
}
/* Bind to CPU cores where the LWK process' IKC target maps to */
if (ikc_mapped && !no_bind_ikc_map) {

View File

@ -242,11 +242,6 @@ long do_syscall(struct syscall_request *req, int cpu)
unsigned long flags;
DECLARE_WAITQ_ENTRY(scd_wq_entry, cpu_local_var(current));
if (req->number == __NR_epoll_wait ||
req->number == __NR_epoll_pwait ||
req->number == __NR_ppoll)
goto schedule;
cpu_pause();
/* Spin if not preemptable */
@ -275,7 +270,6 @@ long do_syscall(struct syscall_request *req, int cpu)
continue;
}
schedule:
flags = cpu_disable_interrupt_save();
/* Try to sleep until notified */
@ -10320,11 +10314,7 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
}
#endif // PROFILE_ENABLE
/* Do not deschedule when returning from an event (e.g., MPI) */
if (!(num == __NR_epoll_wait ||
num == __NR_epoll_pwait ||
num == __NR_ppoll) &&
smp_load_acquire(&v->flags) & CPU_FLAG_NEED_RESCHED) {
if (smp_load_acquire(&v->flags) & CPU_FLAG_NEED_RESCHED) {
check_need_resched();
}

145
test/issues/929/C929.sh Executable file
View File

@ -0,0 +1,145 @@
#/bin/sh
USELTP=0
USEOSTEST=0
. ../../common.sh
issue="929"
tid=01
tname=`printf "C${issue}T%02d" ${tid}`
echo "*** ${tname} start *******************************"
TEST_CMD="mpirun -f ./hostfile -ppn 5 ${MCEXEC} -n 5 ./test_prog.sh"
echo ${TEST_CMD}
${TEST_CMD} &> ${tname}.txt
mpi_ret=$?
cat ./${tname}.txt
started_num=`grep 'test_prog is started' ./${tname}.txt | wc -l`
if [ ${mpi_ret} -eq 0 -a ${started_num} -eq 5 ]; then
echo "*** ${tname} PASSED ******************************"
else
echo "*** ${tname} FAILED ******************************"
fi
let tid++
echo ""
tname=`printf "C${issue}T%02d" ${tid}`
echo "*** ${tname} start *******************************"
TEST_CMD="mpirun -f ./hostfile -ppn 5 ${MCEXEC} -n 3 ./test_prog.sh"
echo ${TEST_CMD}
${TEST_CMD} &> ${tname}.txt
mpi_ret=$?
cat ./${tname}.txt
started_num=`grep 'test_prog is started' ./${tname}.txt | wc -l`
if [ ${mpi_ret} -ne 0 -a ${started_num} -eq 3 ]; then
echo "*** ${tname} PASSED ******************************"
else
echo "*** ${tname} FAILED ******************************"
fi
let tid++
echo ""
tname=`printf "C${issue}T%02d" ${tid}`
echo "*** ${tname} start *******************************"
TEST_CMD="mpirun -f ./hostfile -ppn 3 ${MCEXEC} -n 5 ./test_prog.sh"
echo ${TEST_CMD}
${TEST_CMD} &> ${tname}.txt
mpi_ret=$?
cat ./${tname}.txt
started_num=`grep 'test_prog is started' ./${tname}.txt | wc -l`
if [ ${mpi_ret} -ne 0 -a ${started_num} -eq 0 ]; then
echo "*** ${tname} PASSED ******************************"
else
echo "*** ${tname} FAILED ******************************"
fi
let tid++
echo ""
tname=`printf "C${issue}T%02d" ${tid}`
echo "*** ${tname} start *******************************"
TEST_CMD="mpirun -f ./hostfile -ppn 6 ${MCEXEC} -n 3 ./test_prog.sh"
echo ${TEST_CMD}
${TEST_CMD} &> ${tname}.txt
mpi_ret=$?
cat ./${tname}.txt
started_num=`grep 'test_prog is started' ./${tname}.txt | wc -l`
if [ ${mpi_ret} -ne 0 -a ${started_num} -eq 3 ]; then
echo "*** ${tname} PASSED ******************************"
else
echo "*** ${tname} FAILED ******************************"
fi
let tid++
echo ""
tname=`printf "C${issue}T%02d" ${tid}`
echo "*** ${tname} start *******************************"
TEST_CMD="mpirun -f ./hostfile -ppn 250 ${MCEXEC} -n 250 ./test_prog.sh"
echo ${TEST_CMD}
${TEST_CMD} &> ${tname}.txt
mpi_ret=$?
head -n 10 ./${tname}.txt
echo "..."
started_num=`grep 'test_prog is started' ./${tname}.txt | wc -l`
if [ ${mpi_ret} -ne 0 -a ${started_num} -eq 0 ]; then
echo "*** ${tname} PASSED ******************************"
else
echo "*** ${tname} FAILED ******************************"
fi
let tid++
echo ""
tname=`printf "C${issue}T%02d" ${tid}`
echo "*** ${tname} start *******************************"
ng=0
TEST_CMD="mpirun -f ./hostfile -ppn 5 ${MCEXEC} -n 5 ./test_prog.sh"
echo "** reboot mcrernel for check pe_list_len"
mcreboot
echo "** enable debug message in mcexec_get_cpuset"
sudo sh -c "echo -n 'func mcexec_get_cpuset +p' > /sys/kernel/debug/dynamic_debug/control"
echo ${TEST_CMD}
for i in `seq 1 20`
do
${TEST_CMD} &> ${tname}.txt
mpi_ret=$?
started_num=`grep 'test_prog is started' ./${tname}.txt | wc -l`
if [ ${mpi_ret} -eq 0 -a ${started_num} -eq 5 ]; then
echo "[OK] exec: $i"
else
echo "[NG] exec: $i"
let ng++
fi
done
echo "** check pe_list_len"
dmesg --notime | grep "mcexec_get_cpuset: pe_list" | tail -n 20 | cut -f 2-3 -d ':' > ./pe_list_len.txt
cat ./pe_list_len.txt | while read line
do
len=`echo ${line} | cut -f 2 -d ':'`
if [ ${len} -ge 0 -a ${len} -le 5 ]; then
echo "[OK] ${line}"
else
echo "[NG] ${line}"
let ng++
fi
done
echo "** disable debug message in mcexec_get_cpuset"
sudo sh -c "echo -n 'func mcexec_get_cpuset -p' > /sys/kernel/debug/dynamic_debug/control"
if [ ${ng} -eq 0 ]; then
echo "*** ${tname} PASSED ******************************"
else
echo "*** ${tname} FAILED ******************************"
fi
let tid++
echo ""

11
test/issues/929/Makefile Normal file
View File

@ -0,0 +1,11 @@
CFLAGS=-g
LDFLAGS=
TARGET=
all: $(TARGET)
test: all
./C929.sh
clean:
rm -f $(TARGET) *.o *.txt

36
test/issues/929/README Normal file
View File

@ -0,0 +1,36 @@
【Issue#929 動作確認】
□ テスト内容
1. mpirunで指定する-ppnと、mcexecで指定する-n の指定状況ごとに
想定どおりの動作となることを確認
C929T01:
-ppn == -n の場合に、プログラムが実行され、mpirunが成功する
C929T02:
-ppn > -n の場合に、プログラムの一部が実行され、mpirunが失敗する
C929T03:
-ppn < -n の場合に、プログラムが実行されず、mpirunが失敗する
C929T04:
-ppn が -n の整数倍である場合に、プログラムの一部が実行され、mpirunが失敗する
C929T05:
-ppn と -n がMcKernelに割り当てたCPU数よりも大きい場合に、
プログラムが実行されず、mpirunが失敗する
C929T06:
-ppn == -n での正常実行を20回連続で行った場合に、
プログラムが実行され、mpirunが成功する
また、mcctrlで管理しているpart_exec_list の要素数が5を超えない
□ 実行手順
$ make test
McKernelのインストール先や、OSTEST, LTPの配置場所は、
$HOME/.mck_test_config を参照している
.mck_test_config は、McKernelをビルドした際に生成されるmck_test_config.sample ファイルを
$HOMEにコピーし、適宜編集する
□ 実行結果
x86_64_result.log aarch64_result.log 参照。
すべての項目をPASSしていることを確認。

View File

@ -0,0 +1,99 @@
*** C929T01 start *******************************
mpirun -f ./hostfile -ppn 5 /home/satoken/ihk+mckernel/bin/mcexec -n 5 ./test_prog.sh
test_prog is started.
test_prog is started.
test_prog is started.
test_prog is started.
test_prog is started.
*** C929T01 PASSED ******************************
*** C929T02 start *******************************
mpirun -f ./hostfile -ppn 5 /home/satoken/ihk+mckernel/bin/mcexec -n 3 ./test_prog.sh
getting CPU set for partitioned execution: Invalid argument
getting CPU set for partitioned execution: Invalid argument
test_prog is started.
test_prog is started.
test_prog is started.
*** C929T02 PASSED ******************************
*** C929T03 start *******************************
mpirun -f ./hostfile -ppn 3 /home/satoken/ihk+mckernel/bin/mcexec -n 5 ./test_prog.sh
getting CPU set for partitioned execution: Connection timed out
getting CPU set for partitioned execution: Connection timed out
getting CPU set for partitioned execution: Connection timed out
*** C929T03 PASSED ******************************
*** C929T04 start *******************************
mpirun -f ./hostfile -ppn 6 /home/satoken/ihk+mckernel/bin/mcexec -n 3 ./test_prog.sh
getting CPU set for partitioned execution: Invalid argument
getting CPU set for partitioned execution: Invalid argument
getting CPU set for partitioned execution: Invalid argument
test_prog is started.
test_prog is started.
test_prog is started.
*** C929T04 PASSED ******************************
*** C929T05 start *******************************
mpirun -f ./hostfile -ppn 250 /home/satoken/ihk+mckernel/bin/mcexec -n 250 ./test_prog.sh
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
...
*** C929T05 PASSED ******************************
*** C929T06 start *******************************
** reboot mcrernel for check pe_list_len
mcreboot.sh -c 1-6,29-34 -m 50G@0,50G@1 -r 1-6:0+29-34:28 -O ... done
** enable debug message in mcexec_get_cpuset
mpirun -f ./hostfile -ppn 5 /home/satoken/ihk+mckernel/bin/mcexec -n 5 ./test_prog.sh
[OK] exec: 1
[OK] exec: 2
[OK] exec: 3
[OK] exec: 4
[OK] exec: 5
[OK] exec: 6
[OK] exec: 7
[OK] exec: 8
[OK] exec: 9
[OK] exec: 10
[OK] exec: 11
[OK] exec: 12
[OK] exec: 13
[OK] exec: 14
[OK] exec: 15
[OK] exec: 16
[OK] exec: 17
[OK] exec: 18
[OK] exec: 19
[OK] exec: 20
** check pe_list_len
[OK] pe_list_len:0
[OK] pe_list_len:1
[OK] pe_list_len:2
[OK] pe_list_len:3
[OK] pe_list_len:4
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
[OK] pe_list_len:5
** disable debug message in mcexec_get_cpuset
*** C929T06 PASSED ******************************

1
test/issues/929/hostfile Normal file
View File

@ -0,0 +1 @@
localhost

3
test/issues/929/test_prog.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/sh
echo "test_prog is started."

View File

@ -0,0 +1,74 @@
*** C929T01 start *******************************
mpirun -f ./hostfile -ppn 5 /home/satoken/ihk+mckernel/bin/mcexec -n 5 ./test_prog.sh
test_prog is started.
test_prog is started.
test_prog is started.
test_prog is started.
test_prog is started.
*** C929T01 PASSED ******************************
*** C929T02 start *******************************
mpirun -f ./hostfile -ppn 5 /home/satoken/ihk+mckernel/bin/mcexec -n 3 ./test_prog.sh
getting CPU set for partitioned execution: Invalid argument
getting CPU set for partitioned execution: Invalid argument
test_prog is started.
test_prog is started.
test_prog is started.
*** C929T02 PASSED ******************************
*** C929T03 start *******************************
mpirun -f ./hostfile -ppn 3 /home/satoken/ihk+mckernel/bin/mcexec -n 5 ./test_prog.sh
getting CPU set for partitioned execution: Connection timed out
getting CPU set for partitioned execution: Connection timed out
getting CPU set for partitioned execution: Connection timed out
*** C929T03 PASSED ******************************
*** C929T04 start *******************************
mpirun -f ./hostfile -ppn 6 /home/satoken/ihk+mckernel/bin/mcexec -n 3 ./test_prog.sh
getting CPU set for partitioned execution: Invalid argument
getting CPU set for partitioned execution: Invalid argument
getting CPU set for partitioned execution: Invalid argument
test_prog is started.
test_prog is started.
test_prog is started.
*** C929T04 PASSED ******************************
*** C929T05 start *******************************
mpirun -f ./hostfile -ppn 250 /home/satoken/ihk+mckernel/bin/mcexec -n 250 ./test_prog.sh
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
error: nr_processes can't exceed nr. of CPUs
...
*** C929T05 PASSED ******************************
*** C929T06 start *******************************
mpirun -f ./hostfile -ppn 5 /home/satoken/ihk+mckernel/bin/mcexec -n 5 ./test_prog.sh
[OK] exec: 1
[OK] exec: 2
[OK] exec: 3
[OK] exec: 4
[OK] exec: 5
[OK] exec: 6
[OK] exec: 7
[OK] exec: 8
[OK] exec: 9
[OK] exec: 10
[OK] exec: 11
[OK] exec: 12
[OK] exec: 13
[OK] exec: 14
[OK] exec: 15
[OK] exec: 16
[OK] exec: 17
[OK] exec: 18
[OK] exec: 19
[OK] exec: 20
*** C929T06 PASSED ******************************