Fix other warnings

Most were harmless, but the change to ACCESS_ONCE from volatile
cast is probably useful.
Expanding macro, we basically went from:
    m = (volatile struct sdma_vl_map *)dd->sdma_map;
to
    m = *(volatile struct sdma_vl_map **)&(dd->sdma_map);
i.e. the explicit lookup is at a different level.
This commit is contained in:
Dominique Martinet
2017-09-15 11:16:36 +09:00
committed by Balazs Gerofi
parent 2dc85ee417
commit 7366da4390
5 changed files with 18 additions and 19 deletions

View File

@ -133,7 +133,7 @@ static inline void ihk_atomic64_inc(ihk_atomic64_t *v)
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
#define __xg(x) ((volatile long *)(x))
#define __xg(x) ((volatile typeof(x))(x))
#define xchg4(ptr, x) \
({ \

View File

@ -421,13 +421,10 @@ long hfi1_file_ioctl(void *private_data, unsigned int cmd,
{
struct hfi1_filedata *fd = private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_info uinfo;
#if 0
struct hfi1_tid_info tinfo;
#endif
int ret = 0;
unsigned long addr;
int uval = 0;
unsigned long ul_uval = 0;
u16 uval16 = 0;
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
@ -435,9 +432,6 @@ long hfi1_file_ioctl(void *private_data, unsigned int cmd,
!uctxt)
return -EINVAL;
if (rdtsc() - t_s < 400000000)
return;
switch (cmd) {
case HFI1_IOCTL_ASSIGN_CTXT:
#if 0

View File

@ -764,8 +764,8 @@ static inline int sdma_txadd_page(
u16 len)
{
dma_addr_t addr;
int rval;
#ifdef __HFI1_ORIG__
int rval;
/* TODO: check this coealesce thing */
hfi1_cdbg(AIOWRITE, "+");
if ((unlikely(tx->num_desc == tx->desc_limit))) {

View File

@ -808,7 +808,7 @@ struct sdma_engine *sdma_select_engine_vl(
rcu_read_lock();
m = rcu_dereference(dd->sdma_map);
#else
m = (volatile struct sdma_vl_map *)dd->sdma_map;
m = ACCESS_ONCE(dd->sdma_map);
#endif /* __HFI1_ORIG__ */
if (unlikely(!m)) {
#ifdef __HFI1_ORIG__
@ -1730,20 +1730,20 @@ void __sdma_txclean(
struct hfi1_devdata *dd,
struct sdma_txreq *tx)
{
u16 i;
if (tx->num_desc) {
/* TODO: enable sdma_unmap_desc */
#if 0
u16 i;
u8 skip = 0, mode = ahg_mode(tx);
/* TODO: enable sdma_unmap_desc */
/* unmap first */
//sdma_unmap_desc(dd, &tx->descp[0]);
/* determine number of AHG descriptors to skip */
if (mode > SDMA_AHG_APPLY_UPDATE1)
skip = mode >> 1;
/* TODO: enable sdma_unmap_desc */
// for (i = 1 + skip; i < tx->num_desc; i++)
// sdma_unmap_desc(dd, &tx->descp[i]);
#endif
tx->num_desc = 0;
}
kfree(tx->coalesce_buf);

View File

@ -559,7 +559,9 @@ int hfi1_map_device_addresses(struct hfi1_filedata *fd)
pte_t *ptep;
enum ihk_mc_pt_attribute attr;
void *virt;
#ifdef __HFI1_ORIG__
size_t pgsize;
#endif
unsigned long phys;
unsigned long len;
@ -1134,12 +1136,14 @@ int hfi1_user_sdma_process_request(void *private_data, struct iovec *iovec,
hfi1_cdbg(AIOWRITE, "-wait_event_interruptible_timeout");
#else
TP("+ polling while(pq->state != SDMA_PKT_Q_ACTIVE)");
#ifdef VERBOSE_DEBUG
{
unsigned long ts = rdtsc();
while (pq->state != SDMA_PKT_Q_ACTIVE) cpu_pause();
SDMA_DBG("%s: waited %lu cycles for SDMA_PKT_Q_ACTIVE\n",
__FUNCTION__, rdtsc() - ts);
}
#endif /* VERBOSE_DEBUG */
TP("- polling while(pq->state != SDMA_PKT_Q_ACTIVE)");
#endif /* __HFI1_ORIG__ */
}
@ -1233,7 +1237,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
unsigned maxpkts,
struct kmalloc_cache_header *txreq_cache)
{
int ret = 0, count;
int ret = 0;
u32 count;
unsigned npkts = 0;
struct user_sdma_txreq *tx = NULL;
struct hfi1_user_sdma_pkt_q *pq = NULL;
@ -1437,11 +1442,11 @@ static int user_sdma_send_pkts(struct user_sdma_request *req,
unsigned pageidx;
#endif
unsigned len;
unsigned long base, offset;
uintptr_t base;
void *virt;
base = (unsigned long)iovec->iov.iov_base;
virt = base + iovec->offset + iov_offset;
base = (uintptr_t)iovec->iov.iov_base;
virt = (void*)(base + iovec->offset + iov_offset);
/*
* Resolve iovec->base_phys if virt is out of last page.