Updated kcalloc/kmalloc calls and enabled sdma_select_user_engine dependencies

Conflicts:
	kernel/include/hfi1/ihk_hfi1_common.h
This commit is contained in:
Aram Santogidis
2017-08-03 18:23:29 +09:00
committed by Balazs Gerofi
parent b60a980088
commit fe4c461f2f
5 changed files with 33 additions and 16 deletions

View File

@ -1251,12 +1251,17 @@ struct hfi1_devdata {
};
#endif /* __HFI1_ORIG__ */
//TODO: double check the order
#ifndef __HFI1_ORIG__
struct hfi1_devdata {
struct list_head list;
/* pointers to related structs for this device */
/* pci access data structure */
struct pci_dev *pcidev;
/* lock for sdma_map */
spinlock_t sde_map_lock;
/* array of vl maps */
struct sdma_vl_map __rcu *sdma_map;
dma_addr_t sdma_pad_phys;
/* array of engines sized by num_sdma */
struct sdma_engine *per_sdma;

View File

@ -7,6 +7,7 @@
#include <lwk/compiler.h>
#include <arch-lock.h>
#include <page.h>
#include <string.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@ -65,22 +66,20 @@
#define atomic_read ihk_atomic_read
#define atomic_add ihk_atomic_add
#define atomic_t ihk_atomic_t
typedef ihk_spinlock_t spinlock_t;
/* TODO***********************************/
#define spin_lock_irqsave(lock, flags) do {} while(0)
#define spin_unlock_irqsave(lock, flags) do {} while(0)
#define spin_unlock_irqrestore(lock, flags) do {} while(0)
typedef ihk_spinlock_t spinlock_t;
#define ____cacheline_aligned_in_smp
#define ____cacheline_aligned_in_smp __attribute__((aligned(64)))
#define __iomem
#define spin_lock(...) do {} while(0)
#define spin_unlock(...) do {} while(0)
#define smp_wmb() barrier()
#define smp_rmb() barrier()
/***********************************************/
/* TODO: Figure the corresponding flag for McKernel-kmalloc()*/
#define __GFP_ZERO 0
# define __rcu
#define GFP_KERNEL 0
/* kernel-xppsl_1.5.2/include/linux/seqlock.h */
@ -113,6 +112,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
#define MAX_TID_PAIR_ENTRIES 1024 /* max receive expected pairs */
#define PIO_BLOCK_SIZE 64 /* bytes */
/* From: chip.c/h */
#define TXE_NUM_SDMA_ENGINES 16
//num_vls = HFI1_MAX_VLS_SUPPORTED;
//num_vls = dd->chip_sdma_engines;
#define HFI1_MAX_VLS_SUPPORTED 8
@ -207,12 +207,15 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
if (size != 0 && n > SIZE_MAX / size)
return NULL;
return __kmalloc(n * size, flags);
return kmalloc(n * size, flags);
}
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc(n * size, flags | __GFP_ZERO);
void *mem = kmalloc(n * size, flags);
if (mem)
memset(mem, 0, n * size);
return mem;
}
#endif

View File

@ -882,7 +882,7 @@ static inline int sdma_txadd_kvaddr(
return -ENOSPC;
}
#else
//TODO: dma_map_single
addr = virt_to_phys(kvaddr);
#endif /* __HFI1_ORIG__ */
return _sdma_txadd_daddr(
@ -1038,6 +1038,7 @@ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
*
*/
#endif /* __HFI1_ORIG__ */
/**
* struct sdma_map_elem - mapping for a vl
* @mask - selector mask
@ -1067,12 +1068,17 @@ struct sdma_map_elem {
*/
struct sdma_vl_map {
s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
#ifdef __HFI1_ORIG__
struct rcu_head list;
#else
//TODO: struct rcu_head list;
#endif /* __HFI1_ORIG__ */
u32 mask;
u8 actual_vls;
u8 vls;
struct sdma_map_elem *map[0];
};
#ifdef __HFI1_ORIG__
int sdma_map_init(
struct hfi1_devdata *dd,

View File

@ -770,6 +770,7 @@ int sdma_engine_get_vl(struct sdma_engine *sde)
return vl;
}
#endif /* __HFI1_ORIG__ */
/**
* sdma_select_engine_vl() - select sdma engine
* @dd: devdata
@ -808,25 +809,26 @@ struct sdma_engine *sdma_select_engine_vl(
m = rcu_dereference(dd->sdma_map);
#else
m = (volatile struct sdma_vl_map *)dd->sdma_map;
#endif
#endif /* __HFI1_ORIG__ */
if (unlikely(!m)) {
#ifdef __HFI1_ORIG__
rcu_read_unlock();
#endif
#endif /* __HFI1_ORIG__ */
return &dd->per_sdma[0];
}
e = m->map[vl & m->mask];
rval = e->sde[selector & e->mask];
#ifdef __HFI1_ORIG__
rcu_read_unlock();
#endif
#endif /* __HFI1_ORIG__ */
done:
rval = !rval ? &dd->per_sdma[0] : rval;
trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
// trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
hfi1_cdbg(AIOWRITE, "-");
return rval;
}
#ifdef __HFI1_ORIG__
/**
* sdma_select_engine_sc() - select sdma engine
@ -875,6 +877,7 @@ static struct rhashtable_params sdma_rht_params = {
.mutex_is_held = info_mutex_is_held,
};
#endif /* __HFI1_ORIG__ */
/*
* sdma_select_user_engine() - select sdma engine based on user setup
* @dd: devdata
@ -918,9 +921,10 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
return sde;
out:
#endif
#endif /* __HFI1_ORIG__ */
return sdma_select_engine_vl(dd, selector, vl);
}
#ifdef __HFI1_ORIG__
static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
{

View File

@ -583,7 +583,7 @@ int hfi1_user_sdma_process_request(void *private_data, struct iovec *iovec,
}
}
dkprintf("%s: hfi1_kregbase: 0x%lx -> 0x%lx:%lu\n",
kprintf("%s: hfi1_kregbase: 0x%lx -> 0x%lx:%lu\n",
__FUNCTION__,
hfi1_kregbase, (phys - TXE_PIO_SEND), TXE_PIO_SEND);
}
@ -997,11 +997,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
return -EFAULT;
}
//TODO: kmem_cache_alloc
#ifdef __HFI1_ORIG__
tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
#else
tx = kmalloc(sizeof(struct user_sdma_txreq), GFP_KERNEL | __GFP_ZERO);
tx = kmalloc(sizeof(struct user_sdma_txreq), GFP_KERNEL);
#endif /* __HFI1_ORIG__ */
if (!tx)
return -ENOMEM;