Merge branch 'ioat-md-accel-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop
* 'ioat-md-accel-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop: (28 commits) ioatdma: add the unisys "i/oat" pci vendor/device id ARM: Add drivers/dma to arch/arm/Kconfig iop3xx: surface the iop3xx DMA and AAU units to the iop-adma driver iop13xx: surface the iop13xx adma units to the iop-adma driver dmaengine: driver for the iop32x, iop33x, and iop13xx raid engines md: remove raid5 compute_block and compute_parity5 md: handle_stripe5 - request io processing in raid5_run_ops md: handle_stripe5 - add request/completion logic for async expand ops md: handle_stripe5 - add request/completion logic for async read ops md: handle_stripe5 - add request/completion logic for async check ops md: handle_stripe5 - add request/completion logic for async compute ops md: handle_stripe5 - add request/completion logic for async write ops md: common infrastructure for running operations with raid5_run_ops md: raid5_run_ops - run stripe operations outside sh->lock raid5: replace custom debug PRINTKs with standard pr_debug raid5: refactor handle_stripe5 and handle_stripe6 (v3) async_tx: add the async_tx api xor: make 'xor_blocks' a library routine for use with async_tx dmaengine: make clients responsible for managing channels dmaengine: refactor dmaengine around dma_async_tx_descriptor ...
This commit is contained in:
110
net/core/dev.c
110
net/core/dev.c
@@ -151,9 +151,22 @@ static struct list_head ptype_base[16] __read_mostly; /* 16 way hashed list */
|
||||
static struct list_head ptype_all __read_mostly; /* Taps */
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
static struct dma_client *net_dma_client;
|
||||
static unsigned int net_dma_count;
|
||||
static spinlock_t net_dma_event_lock;
|
||||
struct net_dma {
|
||||
struct dma_client client;
|
||||
spinlock_t lock;
|
||||
cpumask_t channel_mask;
|
||||
struct dma_chan *channels[NR_CPUS];
|
||||
};
|
||||
|
||||
static enum dma_state_client
|
||||
netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
|
||||
enum dma_state state);
|
||||
|
||||
static struct net_dma net_dma = {
|
||||
.client = {
|
||||
.event_callback = netdev_dma_event,
|
||||
},
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -2022,12 +2035,13 @@ out:
|
||||
* There may not be any more sk_buffs coming right now, so push
|
||||
* any pending DMA copies to hardware
|
||||
*/
|
||||
if (net_dma_client) {
|
||||
struct dma_chan *chan;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node)
|
||||
dma_async_memcpy_issue_pending(chan);
|
||||
rcu_read_unlock();
|
||||
if (!cpus_empty(net_dma.channel_mask)) {
|
||||
int chan_idx;
|
||||
for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
|
||||
struct dma_chan *chan = net_dma.channels[chan_idx];
|
||||
if (chan)
|
||||
dma_async_memcpy_issue_pending(chan);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
@@ -3775,12 +3789,13 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
||||
* This is called when the number of channels allocated to the net_dma_client
|
||||
* changes. The net_dma_client tries to have one DMA channel per CPU.
|
||||
*/
|
||||
static void net_dma_rebalance(void)
|
||||
|
||||
static void net_dma_rebalance(struct net_dma *net_dma)
|
||||
{
|
||||
unsigned int cpu, i, n;
|
||||
unsigned int cpu, i, n, chan_idx;
|
||||
struct dma_chan *chan;
|
||||
|
||||
if (net_dma_count == 0) {
|
||||
if (cpus_empty(net_dma->channel_mask)) {
|
||||
for_each_online_cpu(cpu)
|
||||
rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
|
||||
return;
|
||||
@@ -3789,10 +3804,12 @@ static void net_dma_rebalance(void)
|
||||
i = 0;
|
||||
cpu = first_cpu(cpu_online_map);
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry(chan, &net_dma_client->channels, client_node) {
|
||||
n = ((num_online_cpus() / net_dma_count)
|
||||
+ (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
|
||||
for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
|
||||
chan = net_dma->channels[chan_idx];
|
||||
|
||||
n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
|
||||
+ (i < (num_online_cpus() %
|
||||
cpus_weight(net_dma->channel_mask)) ? 1 : 0));
|
||||
|
||||
while(n) {
|
||||
per_cpu(softnet_data, cpu).net_dma = chan;
|
||||
@@ -3801,7 +3818,6 @@ static void net_dma_rebalance(void)
|
||||
}
|
||||
i++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3810,23 +3826,53 @@ static void net_dma_rebalance(void)
|
||||
* @chan: DMA channel for the event
|
||||
* @event: event type
|
||||
*/
|
||||
static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
|
||||
enum dma_event event)
|
||||
static enum dma_state_client
|
||||
netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
|
||||
enum dma_state state)
|
||||
{
|
||||
spin_lock(&net_dma_event_lock);
|
||||
switch (event) {
|
||||
case DMA_RESOURCE_ADDED:
|
||||
net_dma_count++;
|
||||
net_dma_rebalance();
|
||||
int i, found = 0, pos = -1;
|
||||
struct net_dma *net_dma =
|
||||
container_of(client, struct net_dma, client);
|
||||
enum dma_state_client ack = DMA_DUP; /* default: take no action */
|
||||
|
||||
spin_lock(&net_dma->lock);
|
||||
switch (state) {
|
||||
case DMA_RESOURCE_AVAILABLE:
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
if (net_dma->channels[i] == chan) {
|
||||
found = 1;
|
||||
break;
|
||||
} else if (net_dma->channels[i] == NULL && pos < 0)
|
||||
pos = i;
|
||||
|
||||
if (!found && pos >= 0) {
|
||||
ack = DMA_ACK;
|
||||
net_dma->channels[pos] = chan;
|
||||
cpu_set(pos, net_dma->channel_mask);
|
||||
net_dma_rebalance(net_dma);
|
||||
}
|
||||
break;
|
||||
case DMA_RESOURCE_REMOVED:
|
||||
net_dma_count--;
|
||||
net_dma_rebalance();
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
if (net_dma->channels[i] == chan) {
|
||||
found = 1;
|
||||
pos = i;
|
||||
break;
|
||||
}
|
||||
|
||||
if (found) {
|
||||
ack = DMA_ACK;
|
||||
cpu_clear(pos, net_dma->channel_mask);
|
||||
net_dma->channels[i] = NULL;
|
||||
net_dma_rebalance(net_dma);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
spin_unlock(&net_dma_event_lock);
|
||||
spin_unlock(&net_dma->lock);
|
||||
|
||||
return ack;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3834,12 +3880,10 @@ static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
|
||||
*/
|
||||
static int __init netdev_dma_register(void)
|
||||
{
|
||||
spin_lock_init(&net_dma_event_lock);
|
||||
net_dma_client = dma_async_client_register(netdev_dma_event);
|
||||
if (net_dma_client == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
dma_async_client_chan_request(net_dma_client, num_online_cpus());
|
||||
spin_lock_init(&net_dma.lock);
|
||||
dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
|
||||
dma_async_client_register(&net_dma.client);
|
||||
dma_async_client_chan_request(&net_dma.client);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user