Memory ordering and usage of ASM cmpxchg() instead of compiler atomic intrinsics

Change-Id: I4dadebc32721744dad982f3fc5b3eea7ab7ca745
This commit is contained in:
Balazs Gerofi
2019-08-13 15:17:07 +09:00
committed by Masamichi Takagi
parent 7aa2d64294
commit c3c57940ba
4 changed files with 30 additions and 26 deletions

View File

@ -1452,7 +1452,7 @@ retry_alloc:
__FUNCTION__, task_pid_vnr(current), packet->ref);
mb();
if (!packet->req.valid) {
if (!smp_load_acquire(&packet->req.valid)) {
printk("%s: ERROR: stray wakeup pid: %d, tid: %d: SC %lu\n",
__FUNCTION__,
task_tgid_vnr(current),
@ -1462,7 +1462,7 @@ retry_alloc:
goto retry;
}
packet->req.valid = 0; /* ack */
smp_store_release(&packet->req.valid, 0); /* ack */
dprintk("%s: system call: %d, args[0]: %lu, args[1]: %lu, args[2]: %lu, "
"args[3]: %lu, args[4]: %lu, args[5]: %lu\n",
__FUNCTION__,

View File

@ -228,18 +228,19 @@ static int __notify_syscall_requester(ihk_os_t os, struct ikc_scd_packet *packet
c = (usrdata->channels + packet->ref)->c;
/* If spinning, no need for IKC message */
if (__sync_bool_compare_and_swap(&res->req_thread_status,
if (cmpxchg(&res->req_thread_status,
IHK_SCD_REQ_THREAD_SPINNING,
IHK_SCD_REQ_THREAD_TO_BE_WOKEN)) {
IHK_SCD_REQ_THREAD_TO_BE_WOKEN) ==
IHK_SCD_REQ_THREAD_SPINNING) {
dprintk("%s: no need to send IKC message for PID %d\n",
__FUNCTION__, packet->pid);
__FUNCTION__, packet->pid);
return ret;
}
/* Wait until the status goes back to IHK_SCD_REQ_THREAD_SPINNING or
IHK_SCD_REQ_THREAD_DESCHEDULED because two wake-up attempts are competing.
Note that mcexec_terminate_thread() and returning EINTR would compete. */
if (res->req_thread_status == IHK_SCD_REQ_THREAD_TO_BE_WOKEN) {
if (smp_load_acquire(&res->req_thread_status) == IHK_SCD_REQ_THREAD_TO_BE_WOKEN) {
printk("%s: INFO: someone else is waking up the McKernel thread, "
"pid: %d, req status: %lu, syscall nr: %lu\n",
__FUNCTION__, packet->pid,
@ -247,9 +248,10 @@ static int __notify_syscall_requester(ihk_os_t os, struct ikc_scd_packet *packet
}
/* The thread is not spinning any more, make sure it's descheduled */
if (!__sync_bool_compare_and_swap(&res->req_thread_status,
if (cmpxchg(&res->req_thread_status,
IHK_SCD_REQ_THREAD_DESCHEDULED,
IHK_SCD_REQ_THREAD_TO_BE_WOKEN)) {
IHK_SCD_REQ_THREAD_TO_BE_WOKEN) !=
IHK_SCD_REQ_THREAD_DESCHEDULED) {
printk("%s: WARNING: inconsistent requester status, "
"pid: %d, req status: %lu, syscall nr: %lu\n",
__FUNCTION__, packet->pid,

View File

@ -587,8 +587,7 @@ static int fileobj_get_page(struct memobj *memobj, off_t off,
/* Update the array but see if someone did it already and use
* that if so */
if (!__sync_bool_compare_and_swap(&memobj->pages[page_ind],
NULL, virt)) {
if (cmpxchg(&memobj->pages[page_ind], NULL, virt) != NULL) {
ihk_mc_free_pages_user(virt, 1);
}
else {

View File

@ -159,7 +159,7 @@ static void send_syscall(struct syscall_request *req, int cpu,
memcpy(&packet.req, req, sizeof(*req));
barrier();
packet.req.valid = 1;
smp_store_release(&packet.req.valid, 1);
#ifdef SYSCALL_BY_IKC
packet.msg = SCD_MSG_SYSCALL_ONESIDE;
@ -234,8 +234,8 @@ long do_syscall(struct syscall_request *req, int cpu)
#define STATUS_COMPLETED 1
#define STATUS_PAGE_FAULT 3
#define STATUS_SYSCALL 4
while (res.status != STATUS_COMPLETED) {
while (res.status == STATUS_IN_PROGRESS) {
while (smp_load_acquire(&res.status) != STATUS_COMPLETED) {
while (smp_load_acquire(&res.status) == STATUS_IN_PROGRESS) {
struct cpu_local_var *v;
int do_schedule = 0;
long runq_irqstate;
@ -273,10 +273,12 @@ long do_syscall(struct syscall_request *req, int cpu)
flags = cpu_disable_interrupt_save();
/* Try to sleep until notified */
if (res.req_thread_status == IHK_SCD_REQ_THREAD_DESCHEDULED ||
__sync_bool_compare_and_swap(&res.req_thread_status,
IHK_SCD_REQ_THREAD_SPINNING,
IHK_SCD_REQ_THREAD_DESCHEDULED)) {
if (smp_load_acquire(&res.req_thread_status) ==
IHK_SCD_REQ_THREAD_DESCHEDULED ||
(cmpxchg(&res.req_thread_status,
IHK_SCD_REQ_THREAD_SPINNING,
IHK_SCD_REQ_THREAD_DESCHEDULED) ==
IHK_SCD_REQ_THREAD_SPINNING)) {
dkprintf("%s: tid %d waiting for syscall reply...\n",
__FUNCTION__, thread->tid);
waitq_init(&thread->scd_wq);
@ -300,7 +302,7 @@ long do_syscall(struct syscall_request *req, int cpu)
cpu_restore_interrupt(flags);
}
if (res.status == STATUS_SYSCALL) {
if (smp_load_acquire(&res.status) == STATUS_SYSCALL) {
struct syscall_request *requestp;
struct syscall_request request;
int num;
@ -1125,8 +1127,8 @@ void terminate_mcexec(int rc, int sig)
if ((old_exit_status = proc->group_exit_status) & 0x0000000100000000L)
return;
exit_status = 0x0000000100000000L | ((rc & 0x00ff) << 8) | (sig & 0xff);
if (!__sync_bool_compare_and_swap(&proc->group_exit_status,
old_exit_status, exit_status))
if (cmpxchg(&proc->group_exit_status,
old_exit_status, exit_status) != old_exit_status)
return;
if (!proc->nohost) {
request.number = __NR_exit_group;
@ -1979,8 +1981,8 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
populate_len = memobj ? min(len, memobj->size) : len;
if (!(flags & MAP_ANONYMOUS)) {
if (atomic_cmpxchg4(&memobj->status, MEMOBJ_TO_BE_PREFETCHED,
MEMOBJ_READY)) {
if (cmpxchg(&memobj->status, MEMOBJ_TO_BE_PREFETCHED,
MEMOBJ_READY) == MEMOBJ_TO_BE_PREFETCHED) {
populated_mapping = 1;
}
@ -2884,8 +2886,8 @@ unsigned long do_fork(int clone_flags, unsigned long newsp,
retry_tid:
for (i = 0; i < newproc->nr_tids; ++i) {
if (!newproc->tids[i].thread) {
if (!__sync_bool_compare_and_swap(
&newproc->tids[i].thread, NULL, new)) {
if (cmpxchg(&newproc->tids[i].thread,
NULL, new) != NULL) {
goto retry_tid;
}
new->tid = newproc->tids[i].tid;
@ -2988,7 +2990,8 @@ retry_tid:
new->status = PS_RUNNING;
/* Only the first do_fork() call creates a thread on a Linux CPU */
if (__sync_bool_compare_and_swap(&old->mod_clone, SPAWN_TO_REMOTE, SPAWN_TO_LOCAL)) {
if (cmpxchg(&old->mod_clone, SPAWN_TO_REMOTE, SPAWN_TO_LOCAL) ==
SPAWN_TO_REMOTE) {
new->mod_clone = SPAWNING_TO_REMOTE;
if (old->mod_clone_arg) {
new->mod_clone_arg = kmalloc(sizeof(struct uti_attr),
@ -10273,7 +10276,7 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
}
#endif // PROFILE_ENABLE
if (v->flags & CPU_FLAG_NEED_RESCHED) {
if (smp_load_acquire(&v->flags) & CPU_FLAG_NEED_RESCHED) {
check_need_resched();
}