Skip to content

Commit

Permalink
appneta#136 refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
fklassen committed Oct 3, 2014
1 parent d79711d commit 0013b8e
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 34 deletions.
32 changes: 16 additions & 16 deletions kernel/linux/include/linux/quick_tx.h
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ extern void quick_tx_worker(struct work_struct *work);
while(0)

#define LOOKUP_TABLE_SIZE (1 << 17) /* 128K */
#define DMA_BLOCK_TABLE_SIZE (1 << 15) /* 64K */
#define MEM_BLOCK_TABLE_SIZE (1 << 15) /* 64K */

#define DEV_NAME_PREFIX "quick_tx_"
#define FOLDER_NAME_PREFIX "net/"DEV_NAME_PREFIX
Expand Down Expand Up @@ -262,7 +262,7 @@ struct quick_tx_shared_data {
__u32 lookup_consumer_index;
__u32 lookup_producer_index;

struct quick_tx_mem_block_entry mem_blocks[DMA_BLOCK_TABLE_SIZE];
struct quick_tx_mem_block_entry mem_blocks[MEM_BLOCK_TABLE_SIZE];
__u32 mem_producer_index;
__u32 mem_producer_offset;
__u32 num_mem_blocks;
Expand All @@ -273,13 +273,13 @@ struct quick_tx_shared_data {
__u32 prefix_len;
__u32 postfix_len;

__u32 mem_block_page_num;
__u32 num_pages_per_block;

__u32 mbps;

__u8 user_wait_mem_flag;
__u8 user_wait_lookup_flag;
__u8 kernel_wait_lookup_flag;
__u8 producer_wait_mem_flag;
__u8 producer_wait_lookup_flag;
__u8 consumer_wait_lookup_flag;

} __attribute__((aligned(8)));

Expand Down Expand Up @@ -313,9 +313,9 @@ struct quick_tx {
* @return boolean whether the block was successfully mapped
*/
static inline bool quick_tx_mmap_mem_block(struct quick_tx* dev) {
if (dev->data->num_mem_blocks < DMA_BLOCK_TABLE_SIZE) {
if (dev->data->num_mem_blocks < MEM_BLOCK_TABLE_SIZE) {
unsigned int *map;
map = mmap(0, dev->data->mem_block_page_num * PAGE_SIZE,
map = mmap(0, dev->data->num_pages_per_block * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED, dev->fd, 0);

if (map != MAP_FAILED) {
Expand Down Expand Up @@ -348,9 +348,9 @@ static inline int quick_tx_alloc_mem_space(struct quick_tx* dev, __s64 bytes) {
if (dev && dev->data) {
int num = 0;
int num_pages = bytes / 256;
while (num_pages > 0 && dev->data->num_mem_blocks < DMA_BLOCK_TABLE_SIZE) {
while (num_pages > 0 && dev->data->num_mem_blocks < MEM_BLOCK_TABLE_SIZE) {
if (quick_tx_mmap_mem_block(dev)) {
num_pages -= dev->data->mem_block_page_num;
num_pages -= dev->data->num_pages_per_block;
num++;
} else
break;
Expand All @@ -370,7 +370,7 @@ static inline int quick_tx_alloc_mem_space(struct quick_tx* dev, __s64 bytes) {
static inline int quick_tx_mmap_all_mem_blocks(struct quick_tx* dev) {
if (dev && dev->data) {
int num = 0;
while (dev->data->num_mem_blocks < DMA_BLOCK_TABLE_SIZE) {
while (dev->data->num_mem_blocks < MEM_BLOCK_TABLE_SIZE) {
if (quick_tx_mmap_mem_block(dev))
num++;
else
Expand Down Expand Up @@ -443,7 +443,7 @@ static inline bool __get_write_offset_and_inc(struct quick_tx* dev, int length,
__u32 new_mem_producer_index = 0;
/* We will have to use the next available DMA block of memory */
rmb();
new_mem_producer_index = (data->mem_producer_index + 1) % DMA_BLOCK_TABLE_SIZE;
new_mem_producer_index = (data->mem_producer_index + 1) % MEM_BLOCK_TABLE_SIZE;
struct quick_tx_mem_block_entry* next_mem_block =
&data->mem_blocks[new_mem_producer_index];

Expand Down Expand Up @@ -500,7 +500,7 @@ static inline bool __check_error_flags(struct quick_tx_shared_data* data) {
}

static inline void __wake_up_module(struct quick_tx* dev) {
dev->data->kernel_wait_lookup_flag = 1;
dev->data->consumer_wait_lookup_flag = 1;
wmb();
ioctl(dev->fd, START_TX);
}
Expand All @@ -521,11 +521,11 @@ static inline void __poll_for(struct quick_tx* dev, short events, __u8 *flag) {
}

static inline void __poll_for_dma(struct quick_tx* dev) {
__poll_for(dev, POLL_DMA, &dev->data->user_wait_mem_flag);
__poll_for(dev, POLL_DMA, &dev->data->producer_wait_mem_flag);
}

static inline void __poll_for_lookup(struct quick_tx* dev) {
__poll_for(dev, POLL_LOOKUP, &dev->data->user_wait_lookup_flag);
__poll_for(dev, POLL_LOOKUP, &dev->data->producer_wait_lookup_flag);
}

/*
Expand Down Expand Up @@ -578,7 +578,7 @@ static inline bool quick_tx_send_packet(struct quick_tx* dev, const void* buffer
wmb();

static int qtx_s = 0;
if (qtx_s % (DMA_BLOCK_TABLE_SIZE >> 4) == 0) {
if (qtx_s % (MEM_BLOCK_TABLE_SIZE >> 4) == 0) {
__wake_up_module(dev);
}
qtx_s++;
Expand Down
4 changes: 2 additions & 2 deletions kernel/linux/quick_tx/quick_tx_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,9 @@ static unsigned int quick_tx_poll(struct file *file, poll_table *wait)
poll_wait(file, &dev->user_lookup_q, wait);

smp_rmb();
if (dev->shared_data->user_wait_mem_flag)
if (dev->shared_data->producer_wait_mem_flag)
mask |= (POLL_DMA);
if (dev->shared_data->user_wait_lookup_flag)
if (dev->shared_data->producer_wait_lookup_flag)
mask |= (POLL_LOOKUP);

mutex_unlock(&dev->mtx);
Expand Down
18 changes: 9 additions & 9 deletions kernel/linux/quick_tx/quick_tx_mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ void quick_tx_vm_mem_close(struct vm_area_struct *vma)
if (dev->shared_data->num_mem_blocks > 0) {
#if DMA_COHERENT
if (dev->using_mem_coherent)
dma_free_coherent(&dev->netdev->dev, dev->shared_data->mem_block_page_num * PAGE_SIZE,
dma_free_coherent(&dev->netdev->dev, dev->shared_data->num_pages_per_block * PAGE_SIZE,
dev->shared_data->mem_blocks[dev->shared_data->num_mem_blocks - 1].kernel_addr,
(dma_addr_t)dev->shared_data->mem_blocks[dev->shared_data->num_mem_blocks - 1].mem_handle);
else
Expand Down Expand Up @@ -162,11 +162,11 @@ int quick_tx_mmap_master(struct file * file, struct vm_area_struct * vma) {
dev->shared_data->prefix_len = NET_SKB_PAD;
dev->shared_data->postfix_len = sizeof(struct skb_shared_info);

dev->shared_data->mem_block_page_num = 2 * (PAGE_ALIGN(dev->netdev->mtu) >> PAGE_SHIFT);
dev->shared_data->num_pages_per_block = 2 * (PAGE_ALIGN(dev->netdev->mtu) >> PAGE_SHIFT);
dev->quit_work = false;
smp_wmb();

qtx_error("pages per DMA block set to %d", dev->shared_data->mem_block_page_num);
qtx_error("pages per DMA block set to %d", dev->shared_data->num_pages_per_block);

INIT_WORK(&dev->tx_work, quick_tx_worker);
dev->tx_workqueue = alloc_workqueue(QUICK_TX_WORKQUEUE, WQ_UNBOUND | WQ_CPU_INTENSIVE | WQ_HIGHPRI, 1);
Expand Down Expand Up @@ -195,7 +195,7 @@ int quick_tx_mmap_mem_block(struct file * file, struct vm_area_struct * vma)

mutex_lock(&dev->mtx);

if (dev->shared_data && dev->shared_data->num_mem_blocks >= DMA_BLOCK_TABLE_SIZE) {
if (dev->shared_data && dev->shared_data->num_mem_blocks >= MEM_BLOCK_TABLE_SIZE) {
qtx_error("This device already has the maximum number of DMA blocks mapped to it");
return -ENOMEM;
}
Expand All @@ -207,11 +207,11 @@ int quick_tx_mmap_mem_block(struct file * file, struct vm_area_struct * vma)

#if DMA_COHERENT
if (dev->using_mem_coherent)
mem_block_p = dma_alloc_coherent(dev->netdev->dev.parent, dev->shared_data->mem_block_page_num * PAGE_SIZE,
mem_block_p = dma_alloc_coherent(dev->netdev->dev.parent, dev->shared_data->num_pages_per_block * PAGE_SIZE,
(dma_addr_t*)&entry->mem_handle, GFP_KERNEL);
else
#endif
mem_block_p = kmalloc(dev->shared_data->mem_block_page_num * PAGE_SIZE, GFP_KERNEL);
mem_block_p = kmalloc(dev->shared_data->num_pages_per_block * PAGE_SIZE, GFP_KERNEL);

if (!mem_block_p)
{
Expand All @@ -235,7 +235,7 @@ int quick_tx_mmap_mem_block(struct file * file, struct vm_area_struct * vma)
vma->vm_ops = &quick_tx_vma_ops_dma;

entry->kernel_addr = mem_block_p;
entry->length = dev->shared_data->mem_block_page_num * PAGE_SIZE;
entry->length = dev->shared_data->num_pages_per_block * PAGE_SIZE;

dev->shared_data->num_mem_blocks++;
wmb();
Expand All @@ -247,7 +247,7 @@ int quick_tx_mmap_mem_block(struct file * file, struct vm_area_struct * vma)
error_map:
#if DMA_COHERENT
if (dev->using_mem_coherent)
dma_free_coherent(&dev->netdev->dev, dev->shared_data->mem_block_page_num * PAGE_SIZE,
dma_free_coherent(&dev->netdev->dev, dev->shared_data->num_pages_per_block * PAGE_SIZE,
mem_block_p, (dma_addr_t)entry->mem_handle);
else
#endif
Expand All @@ -267,7 +267,7 @@ int quick_tx_mmap(struct file * file, struct vm_area_struct * vma)
int num_pages = PAGE_ALIGN(vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
if (num_pages == QTX_MASTER_PAGE_NUM) {
return quick_tx_mmap_master(file, vma);
} else if ((dev->shared_data) && num_pages == dev->shared_data->mem_block_page_num) {
} else if ((dev->shared_data) && num_pages == dev->shared_data->num_pages_per_block) {
return quick_tx_mmap_mem_block(file, vma);
} else {
qtx_error("Invalid map size!");
Expand Down
14 changes: 7 additions & 7 deletions kernel/linux/quick_tx/quick_tx_worker.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ static inline void quick_tx_set_flag_wake_up_queue(wait_queue_head_t *q, __u8 *f
}

inline void quick_tx_wake_up_user_dma(struct quick_tx_dev *dev) {
quick_tx_set_flag_wake_up_queue(&dev->user_mem_q, &dev->shared_data->user_wait_mem_flag);
quick_tx_set_flag_wake_up_queue(&dev->user_mem_q, &dev->shared_data->producer_wait_mem_flag);
}

inline void quick_tx_wake_up_user_lookup(struct quick_tx_dev *dev) {
quick_tx_set_flag_wake_up_queue(&dev->user_lookup_q, &dev->shared_data->user_wait_lookup_flag);
quick_tx_set_flag_wake_up_queue(&dev->user_lookup_q, &dev->shared_data->producer_wait_lookup_flag);
}

inline void quick_tx_wake_up_kernel_lookup(struct quick_tx_dev *dev) {
quick_tx_set_flag_wake_up_queue(&dev->kernel_lookup_q, &dev->shared_data->kernel_wait_lookup_flag);
quick_tx_set_flag_wake_up_queue(&dev->kernel_lookup_q, &dev->shared_data->consumer_wait_lookup_flag);
}

static inline int quick_tx_clear_skb_list(struct quick_tx_skb *list) {
Expand Down Expand Up @@ -298,8 +298,8 @@ void quick_tx_worker(struct work_struct *work)

txq = netdev_get_tx_queue(dev->netdev, 0);

dev->shared_data->kernel_wait_lookup_flag = 0;
wait_event(dev->kernel_lookup_q, dev->shared_data->kernel_wait_lookup_flag);
dev->shared_data->consumer_wait_lookup_flag = 0;
wait_event(dev->kernel_lookup_q, dev->shared_data->consumer_wait_lookup_flag);
dev->time_start_tx = ktime_get_real();

while (true) {
Expand Down Expand Up @@ -361,15 +361,15 @@ void quick_tx_worker(struct work_struct *work)
#endif

dev->numsleeps++;
dev->shared_data->kernel_wait_lookup_flag = 0;
dev->shared_data->consumer_wait_lookup_flag = 0;
smp_wmb();

/* Free some DMA blocks before going to sleep */
if(!list_empty(&dev->skb_queued_list.list))
quick_tx_do_xmit(NULL, txq, dev, 1, false);
quick_tx_free_skb(dev, false);

wait_event(dev->kernel_lookup_q, dev->shared_data->kernel_wait_lookup_flag);
wait_event(dev->kernel_lookup_q, dev->shared_data->consumer_wait_lookup_flag);
}
}

Expand Down

0 comments on commit 0013b8e

Please sign in to comment.