Merge branch 'linus' into core/urgent
This commit is contained in:
@@ -538,9 +538,10 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
struct acpi_madt_local_apic *lapic;
|
||||
cpumask_t tmp_map, new_map;
|
||||
cpumask_var_t tmp_map, new_map;
|
||||
u8 physid;
|
||||
int cpu;
|
||||
int retval = -ENOMEM;
|
||||
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
|
||||
return -EINVAL;
|
||||
@@ -569,23 +570,37 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||
buffer.length = ACPI_ALLOCATE_BUFFER;
|
||||
buffer.pointer = NULL;
|
||||
|
||||
tmp_map = cpu_present_map;
|
||||
if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
|
||||
goto out;
|
||||
|
||||
if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
|
||||
goto free_tmp_map;
|
||||
|
||||
cpumask_copy(tmp_map, cpu_present_mask);
|
||||
acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
|
||||
|
||||
/*
|
||||
* If mp_register_lapic successfully generates a new logical cpu
|
||||
* number, then the following will get us exactly what was mapped
|
||||
*/
|
||||
cpus_andnot(new_map, cpu_present_map, tmp_map);
|
||||
if (cpus_empty(new_map)) {
|
||||
cpumask_andnot(new_map, cpu_present_mask, tmp_map);
|
||||
if (cpumask_empty(new_map)) {
|
||||
printk ("Unable to map lapic to logical cpu number\n");
|
||||
return -EINVAL;
|
||||
retval = -EINVAL;
|
||||
goto free_new_map;
|
||||
}
|
||||
|
||||
cpu = first_cpu(new_map);
|
||||
cpu = cpumask_first(new_map);
|
||||
|
||||
*pcpu = cpu;
|
||||
return 0;
|
||||
retval = 0;
|
||||
|
||||
free_new_map:
|
||||
free_cpumask_var(new_map);
|
||||
free_tmp_map:
|
||||
free_cpumask_var(tmp_map);
|
||||
out:
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* wrapper to silence section mismatch warning */
|
||||
@@ -598,7 +613,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
{
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = -1;
|
||||
cpu_clear(cpu, cpu_present_map);
|
||||
set_cpu_present(cpu, false);
|
||||
num_processors--;
|
||||
|
||||
return (0);
|
||||
|
@@ -20,8 +20,12 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
#include <linux/iommu.h>
|
||||
#endif
|
||||
#include <asm/proto.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/gart.h>
|
||||
@@ -38,6 +42,10 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
|
||||
static LIST_HEAD(iommu_pd_list);
|
||||
static DEFINE_SPINLOCK(iommu_pd_list_lock);
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
static struct iommu_ops amd_iommu_ops;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* general struct to manage commands send to an IOMMU
|
||||
*/
|
||||
@@ -47,6 +55,68 @@ struct iommu_cmd {
|
||||
|
||||
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
|
||||
struct unity_map_entry *e);
|
||||
static struct dma_ops_domain *find_protection_domain(u16 devid);
|
||||
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_STATS
|
||||
|
||||
/*
|
||||
* Initialization code for statistics collection
|
||||
*/
|
||||
|
||||
DECLARE_STATS_COUNTER(compl_wait);
|
||||
DECLARE_STATS_COUNTER(cnt_map_single);
|
||||
DECLARE_STATS_COUNTER(cnt_unmap_single);
|
||||
DECLARE_STATS_COUNTER(cnt_map_sg);
|
||||
DECLARE_STATS_COUNTER(cnt_unmap_sg);
|
||||
DECLARE_STATS_COUNTER(cnt_alloc_coherent);
|
||||
DECLARE_STATS_COUNTER(cnt_free_coherent);
|
||||
DECLARE_STATS_COUNTER(cross_page);
|
||||
DECLARE_STATS_COUNTER(domain_flush_single);
|
||||
DECLARE_STATS_COUNTER(domain_flush_all);
|
||||
DECLARE_STATS_COUNTER(alloced_io_mem);
|
||||
DECLARE_STATS_COUNTER(total_map_requests);
|
||||
|
||||
static struct dentry *stats_dir;
|
||||
static struct dentry *de_isolate;
|
||||
static struct dentry *de_fflush;
|
||||
|
||||
static void amd_iommu_stats_add(struct __iommu_counter *cnt)
|
||||
{
|
||||
if (stats_dir == NULL)
|
||||
return;
|
||||
|
||||
cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
|
||||
&cnt->value);
|
||||
}
|
||||
|
||||
static void amd_iommu_stats_init(void)
|
||||
{
|
||||
stats_dir = debugfs_create_dir("amd-iommu", NULL);
|
||||
if (stats_dir == NULL)
|
||||
return;
|
||||
|
||||
de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
|
||||
(u32 *)&amd_iommu_isolate);
|
||||
|
||||
de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
|
||||
(u32 *)&amd_iommu_unmap_flush);
|
||||
|
||||
amd_iommu_stats_add(&compl_wait);
|
||||
amd_iommu_stats_add(&cnt_map_single);
|
||||
amd_iommu_stats_add(&cnt_unmap_single);
|
||||
amd_iommu_stats_add(&cnt_map_sg);
|
||||
amd_iommu_stats_add(&cnt_unmap_sg);
|
||||
amd_iommu_stats_add(&cnt_alloc_coherent);
|
||||
amd_iommu_stats_add(&cnt_free_coherent);
|
||||
amd_iommu_stats_add(&cross_page);
|
||||
amd_iommu_stats_add(&domain_flush_single);
|
||||
amd_iommu_stats_add(&domain_flush_all);
|
||||
amd_iommu_stats_add(&alloced_io_mem);
|
||||
amd_iommu_stats_add(&total_map_requests);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* returns !0 if the IOMMU is caching non-present entries in its TLB */
|
||||
static int iommu_has_npcache(struct amd_iommu *iommu)
|
||||
@@ -189,41 +259,23 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
ret = __iommu_queue_command(iommu, cmd);
|
||||
if (!ret)
|
||||
iommu->need_sync = 1;
|
||||
iommu->need_sync = true;
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called whenever we need to ensure that the IOMMU has
|
||||
* completed execution of all commands we sent. It sends a
|
||||
* COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
|
||||
* us about that by writing a value to a physical address we pass with
|
||||
* the command.
|
||||
* This function waits until an IOMMU has completed a completion
|
||||
* wait command
|
||||
*/
|
||||
static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||
static void __iommu_wait_for_completion(struct amd_iommu *iommu)
|
||||
{
|
||||
int ret = 0, ready = 0;
|
||||
int ready = 0;
|
||||
unsigned status = 0;
|
||||
struct iommu_cmd cmd;
|
||||
unsigned long flags, i = 0;
|
||||
unsigned long i = 0;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
|
||||
CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
if (!iommu->need_sync)
|
||||
goto out;
|
||||
|
||||
iommu->need_sync = 0;
|
||||
|
||||
ret = __iommu_queue_command(iommu, &cmd);
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
INC_STATS_COUNTER(compl_wait);
|
||||
|
||||
while (!ready && (i < EXIT_LOOP_COUNT)) {
|
||||
++i;
|
||||
@@ -238,6 +290,48 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||
|
||||
if (unlikely(i == EXIT_LOOP_COUNT))
|
||||
panic("AMD IOMMU: Completion wait loop failed\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* This function queues a completion wait command into the command
|
||||
* buffer of an IOMMU
|
||||
*/
|
||||
static int __iommu_completion_wait(struct amd_iommu *iommu)
|
||||
{
|
||||
struct iommu_cmd cmd;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
|
||||
CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
|
||||
|
||||
return __iommu_queue_command(iommu, &cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called whenever we need to ensure that the IOMMU has
|
||||
* completed execution of all commands we sent. It sends a
|
||||
* COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
|
||||
* us about that by writing a value to a physical address we pass with
|
||||
* the command.
|
||||
*/
|
||||
static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
if (!iommu->need_sync)
|
||||
goto out;
|
||||
|
||||
ret = __iommu_completion_wait(iommu);
|
||||
|
||||
iommu->need_sync = false;
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
__iommu_wait_for_completion(iommu);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
@@ -264,6 +358,21 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
|
||||
u16 domid, int pde, int s)
|
||||
{
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
address &= PAGE_MASK;
|
||||
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
|
||||
cmd->data[1] |= domid;
|
||||
cmd->data[2] = lower_32_bits(address);
|
||||
cmd->data[3] = upper_32_bits(address);
|
||||
if (s) /* size bit - we flush more than one 4kb page */
|
||||
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
|
||||
if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
|
||||
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic command send function for invalidaing TLB entries
|
||||
*/
|
||||
@@ -273,16 +382,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
|
||||
struct iommu_cmd cmd;
|
||||
int ret;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
address &= PAGE_MASK;
|
||||
CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
|
||||
cmd.data[1] |= domid;
|
||||
cmd.data[2] = lower_32_bits(address);
|
||||
cmd.data[3] = upper_32_bits(address);
|
||||
if (s) /* size bit - we flush more than one 4kb page */
|
||||
cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
|
||||
if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
|
||||
cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
|
||||
__iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
|
||||
@@ -321,9 +421,35 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
|
||||
{
|
||||
u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
|
||||
|
||||
INC_STATS_COUNTER(domain_flush_single);
|
||||
|
||||
iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is used to flush the IO/TLB for a given protection domain
|
||||
* on every IOMMU in the system
|
||||
*/
|
||||
static void iommu_flush_domain(u16 domid)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct amd_iommu *iommu;
|
||||
struct iommu_cmd cmd;
|
||||
|
||||
INC_STATS_COUNTER(domain_flush_all);
|
||||
|
||||
__iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
|
||||
domid, 1, 1);
|
||||
|
||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
__iommu_queue_command(iommu, &cmd);
|
||||
__iommu_completion_wait(iommu);
|
||||
__iommu_wait_for_completion(iommu);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
*
|
||||
* The functions below are used the create the page table mappings for
|
||||
@@ -338,10 +464,10 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
|
||||
* supporting all features of AMD IOMMU page tables like level skipping
|
||||
* and full 64 bit address spaces.
|
||||
*/
|
||||
static int iommu_map(struct protection_domain *dom,
|
||||
unsigned long bus_addr,
|
||||
unsigned long phys_addr,
|
||||
int prot)
|
||||
static int iommu_map_page(struct protection_domain *dom,
|
||||
unsigned long bus_addr,
|
||||
unsigned long phys_addr,
|
||||
int prot)
|
||||
{
|
||||
u64 __pte, *pte, *page;
|
||||
|
||||
@@ -388,6 +514,28 @@ static int iommu_map(struct protection_domain *dom,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iommu_unmap_page(struct protection_domain *dom,
|
||||
unsigned long bus_addr)
|
||||
{
|
||||
u64 *pte;
|
||||
|
||||
pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(*pte))
|
||||
return;
|
||||
|
||||
pte = IOMMU_PTE_PAGE(*pte);
|
||||
pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(*pte))
|
||||
return;
|
||||
|
||||
pte = IOMMU_PTE_PAGE(*pte);
|
||||
pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
|
||||
|
||||
*pte = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function checks if a specific unity mapping entry is needed for
|
||||
* this specific IOMMU.
|
||||
@@ -440,7 +588,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
|
||||
|
||||
for (addr = e->address_start; addr < e->address_end;
|
||||
addr += PAGE_SIZE) {
|
||||
ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
|
||||
ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
/*
|
||||
@@ -571,6 +719,16 @@ static u16 domain_id_alloc(void)
|
||||
return id;
|
||||
}
|
||||
|
||||
static void domain_id_free(int id)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
||||
if (id > 0 && id < MAX_DOMAIN_ID)
|
||||
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
|
||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Used to reserve address ranges in the aperture (e.g. for exclusion
|
||||
* ranges.
|
||||
@@ -587,12 +745,12 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
|
||||
iommu_area_reserve(dom->bitmap, start_page, pages);
|
||||
}
|
||||
|
||||
static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
|
||||
static void free_pagetable(struct protection_domain *domain)
|
||||
{
|
||||
int i, j;
|
||||
u64 *p1, *p2, *p3;
|
||||
|
||||
p1 = dma_dom->domain.pt_root;
|
||||
p1 = domain->pt_root;
|
||||
|
||||
if (!p1)
|
||||
return;
|
||||
@@ -613,6 +771,8 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
|
||||
}
|
||||
|
||||
free_page((unsigned long)p1);
|
||||
|
||||
domain->pt_root = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -624,7 +784,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
|
||||
if (!dom)
|
||||
return;
|
||||
|
||||
dma_ops_free_pagetable(dom);
|
||||
free_pagetable(&dom->domain);
|
||||
|
||||
kfree(dom->pte_pages);
|
||||
|
||||
@@ -663,6 +823,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
|
||||
goto free_dma_dom;
|
||||
dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
|
||||
dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
dma_dom->domain.flags = PD_DMA_OPS_MASK;
|
||||
dma_dom->domain.priv = dma_dom;
|
||||
if (!dma_dom->domain.pt_root)
|
||||
goto free_dma_dom;
|
||||
@@ -724,6 +885,15 @@ free_dma_dom:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* little helper function to check whether a given protection domain is a
|
||||
* dma_ops domain
|
||||
*/
|
||||
static bool dma_ops_domain(struct protection_domain *domain)
|
||||
{
|
||||
return domain->flags & PD_DMA_OPS_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find out the protection domain structure for a given PCI device. This
|
||||
* will give us the pointer to the page table root for example.
|
||||
@@ -744,14 +914,15 @@ static struct protection_domain *domain_for_device(u16 devid)
|
||||
* If a device is not yet associated with a domain, this function does
|
||||
* assigns it visible for the hardware
|
||||
*/
|
||||
static void set_device_domain(struct amd_iommu *iommu,
|
||||
struct protection_domain *domain,
|
||||
u16 devid)
|
||||
static void attach_device(struct amd_iommu *iommu,
|
||||
struct protection_domain *domain,
|
||||
u16 devid)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
u64 pte_root = virt_to_phys(domain->pt_root);
|
||||
|
||||
domain->dev_cnt += 1;
|
||||
|
||||
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
|
||||
<< DEV_ENTRY_MODE_SHIFT;
|
||||
pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
|
||||
@@ -767,6 +938,116 @@ static void set_device_domain(struct amd_iommu *iommu,
|
||||
iommu_queue_inv_dev_entry(iommu, devid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes a device from a protection domain (unlocked)
|
||||
*/
|
||||
static void __detach_device(struct protection_domain *domain, u16 devid)
|
||||
{
|
||||
|
||||
/* lock domain */
|
||||
spin_lock(&domain->lock);
|
||||
|
||||
/* remove domain from the lookup table */
|
||||
amd_iommu_pd_table[devid] = NULL;
|
||||
|
||||
/* remove entry from the device table seen by the hardware */
|
||||
amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
|
||||
amd_iommu_dev_table[devid].data[1] = 0;
|
||||
amd_iommu_dev_table[devid].data[2] = 0;
|
||||
|
||||
/* decrease reference counter */
|
||||
domain->dev_cnt -= 1;
|
||||
|
||||
/* ready */
|
||||
spin_unlock(&domain->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes a device from a protection domain (with devtable_lock held)
|
||||
*/
|
||||
static void detach_device(struct protection_domain *domain, u16 devid)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* lock device table */
|
||||
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
||||
__detach_device(domain, devid);
|
||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||
}
|
||||
|
||||
static int device_change_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
|
||||
struct protection_domain *domain;
|
||||
struct dma_ops_domain *dma_domain;
|
||||
struct amd_iommu *iommu;
|
||||
int order = amd_iommu_aperture_order;
|
||||
unsigned long flags;
|
||||
|
||||
if (devid > amd_iommu_last_bdf)
|
||||
goto out;
|
||||
|
||||
devid = amd_iommu_alias_table[devid];
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
if (iommu == NULL)
|
||||
goto out;
|
||||
|
||||
domain = domain_for_device(devid);
|
||||
|
||||
if (domain && !dma_ops_domain(domain))
|
||||
WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
|
||||
"to a non-dma-ops domain\n", dev_name(dev));
|
||||
|
||||
switch (action) {
|
||||
case BUS_NOTIFY_BOUND_DRIVER:
|
||||
if (domain)
|
||||
goto out;
|
||||
dma_domain = find_protection_domain(devid);
|
||||
if (!dma_domain)
|
||||
dma_domain = iommu->default_dom;
|
||||
attach_device(iommu, &dma_domain->domain, devid);
|
||||
printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
|
||||
"device %s\n", dma_domain->domain.id, dev_name(dev));
|
||||
break;
|
||||
case BUS_NOTIFY_UNBIND_DRIVER:
|
||||
if (!domain)
|
||||
goto out;
|
||||
detach_device(domain, devid);
|
||||
break;
|
||||
case BUS_NOTIFY_ADD_DEVICE:
|
||||
/* allocate a protection domain if a device is added */
|
||||
dma_domain = find_protection_domain(devid);
|
||||
if (dma_domain)
|
||||
goto out;
|
||||
dma_domain = dma_ops_domain_alloc(iommu, order);
|
||||
if (!dma_domain)
|
||||
goto out;
|
||||
dma_domain->target_dev = devid;
|
||||
|
||||
spin_lock_irqsave(&iommu_pd_list_lock, flags);
|
||||
list_add_tail(&dma_domain->list, &iommu_pd_list);
|
||||
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
|
||||
|
||||
break;
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
||||
iommu_queue_inv_dev_entry(iommu, devid);
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct notifier_block device_nb = {
|
||||
.notifier_call = device_change_notifier,
|
||||
};
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* The next functions belong to the dma_ops mapping/unmapping code.
|
||||
@@ -802,7 +1083,6 @@ static struct dma_ops_domain *find_protection_domain(u16 devid)
|
||||
list_for_each_entry(entry, &iommu_pd_list, list) {
|
||||
if (entry->target_dev == devid) {
|
||||
ret = entry;
|
||||
list_del(&ret->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -853,14 +1133,13 @@ static int get_device_resources(struct device *dev,
|
||||
if (!dma_dom)
|
||||
dma_dom = (*iommu)->default_dom;
|
||||
*domain = &dma_dom->domain;
|
||||
set_device_domain(*iommu, *domain, *bdf);
|
||||
attach_device(*iommu, *domain, *bdf);
|
||||
printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
|
||||
"device ", (*domain)->id);
|
||||
print_devid(_bdf, 1);
|
||||
"device %s\n", (*domain)->id, dev_name(dev));
|
||||
}
|
||||
|
||||
if (domain_for_device(_bdf) == NULL)
|
||||
set_device_domain(*iommu, *domain, _bdf);
|
||||
attach_device(*iommu, *domain, _bdf);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -946,6 +1225,11 @@ static dma_addr_t __map_single(struct device *dev,
|
||||
pages = iommu_num_pages(paddr, size, PAGE_SIZE);
|
||||
paddr &= PAGE_MASK;
|
||||
|
||||
INC_STATS_COUNTER(total_map_requests);
|
||||
|
||||
if (pages > 1)
|
||||
INC_STATS_COUNTER(cross_page);
|
||||
|
||||
if (align)
|
||||
align_mask = (1UL << get_order(size)) - 1;
|
||||
|
||||
@@ -962,6 +1246,8 @@ static dma_addr_t __map_single(struct device *dev,
|
||||
}
|
||||
address += offset;
|
||||
|
||||
ADD_STATS_COUNTER(alloced_io_mem, size);
|
||||
|
||||
if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
|
||||
iommu_flush_tlb(iommu, dma_dom->domain.id);
|
||||
dma_dom->need_flush = false;
|
||||
@@ -998,6 +1284,8 @@ static void __unmap_single(struct amd_iommu *iommu,
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
||||
SUB_STATS_COUNTER(alloced_io_mem, size);
|
||||
|
||||
dma_ops_free_addresses(dma_dom, dma_addr, pages);
|
||||
|
||||
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
|
||||
@@ -1019,6 +1307,8 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
|
||||
dma_addr_t addr;
|
||||
u64 dma_mask;
|
||||
|
||||
INC_STATS_COUNTER(cnt_map_single);
|
||||
|
||||
if (!check_device(dev))
|
||||
return bad_dma_address;
|
||||
|
||||
@@ -1030,6 +1320,9 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
|
||||
/* device not handled by any AMD IOMMU */
|
||||
return (dma_addr_t)paddr;
|
||||
|
||||
if (!dma_ops_domain(domain))
|
||||
return bad_dma_address;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
|
||||
dma_mask);
|
||||
@@ -1055,11 +1348,16 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
struct protection_domain *domain;
|
||||
u16 devid;
|
||||
|
||||
INC_STATS_COUNTER(cnt_unmap_single);
|
||||
|
||||
if (!check_device(dev) ||
|
||||
!get_device_resources(dev, &iommu, &domain, &devid))
|
||||
/* device not handled by any AMD IOMMU */
|
||||
return;
|
||||
|
||||
if (!dma_ops_domain(domain))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
|
||||
@@ -1104,6 +1402,8 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int mapped_elems = 0;
|
||||
u64 dma_mask;
|
||||
|
||||
INC_STATS_COUNTER(cnt_map_sg);
|
||||
|
||||
if (!check_device(dev))
|
||||
return 0;
|
||||
|
||||
@@ -1114,6 +1414,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
if (!iommu || !domain)
|
||||
return map_sg_no_iommu(dev, sglist, nelems, dir);
|
||||
|
||||
if (!dma_ops_domain(domain))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
@@ -1163,10 +1466,15 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
u16 devid;
|
||||
int i;
|
||||
|
||||
INC_STATS_COUNTER(cnt_unmap_sg);
|
||||
|
||||
if (!check_device(dev) ||
|
||||
!get_device_resources(dev, &iommu, &domain, &devid))
|
||||
return;
|
||||
|
||||
if (!dma_ops_domain(domain))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
@@ -1194,6 +1502,8 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
||||
phys_addr_t paddr;
|
||||
u64 dma_mask = dev->coherent_dma_mask;
|
||||
|
||||
INC_STATS_COUNTER(cnt_alloc_coherent);
|
||||
|
||||
if (!check_device(dev))
|
||||
return NULL;
|
||||
|
||||
@@ -1212,6 +1522,9 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
||||
return virt_addr;
|
||||
}
|
||||
|
||||
if (!dma_ops_domain(domain))
|
||||
goto out_free;
|
||||
|
||||
if (!dma_mask)
|
||||
dma_mask = *dev->dma_mask;
|
||||
|
||||
@@ -1220,18 +1533,20 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
||||
*dma_addr = __map_single(dev, iommu, domain->priv, paddr,
|
||||
size, DMA_BIDIRECTIONAL, true, dma_mask);
|
||||
|
||||
if (*dma_addr == bad_dma_address) {
|
||||
free_pages((unsigned long)virt_addr, get_order(size));
|
||||
virt_addr = NULL;
|
||||
goto out;
|
||||
}
|
||||
if (*dma_addr == bad_dma_address)
|
||||
goto out_free;
|
||||
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
return virt_addr;
|
||||
|
||||
out_free:
|
||||
|
||||
free_pages((unsigned long)virt_addr, get_order(size));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1245,6 +1560,8 @@ static void free_coherent(struct device *dev, size_t size,
|
||||
struct protection_domain *domain;
|
||||
u16 devid;
|
||||
|
||||
INC_STATS_COUNTER(cnt_free_coherent);
|
||||
|
||||
if (!check_device(dev))
|
||||
return;
|
||||
|
||||
@@ -1253,6 +1570,9 @@ static void free_coherent(struct device *dev, size_t size,
|
||||
if (!iommu || !domain)
|
||||
goto free_mem;
|
||||
|
||||
if (!dma_ops_domain(domain))
|
||||
goto free_mem;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
|
||||
@@ -1296,7 +1616,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
|
||||
* we don't need to preallocate the protection domains anymore.
|
||||
* For now we have to.
|
||||
*/
|
||||
void prealloc_protection_domains(void)
|
||||
static void prealloc_protection_domains(void)
|
||||
{
|
||||
struct pci_dev *dev = NULL;
|
||||
struct dma_ops_domain *dma_dom;
|
||||
@@ -1305,7 +1625,7 @@ void prealloc_protection_domains(void)
|
||||
u16 devid;
|
||||
|
||||
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
|
||||
devid = (dev->bus->number << 8) | dev->devfn;
|
||||
devid = calc_devid(dev->bus->number, dev->devfn);
|
||||
if (devid > amd_iommu_last_bdf)
|
||||
continue;
|
||||
devid = amd_iommu_alias_table[devid];
|
||||
@@ -1352,6 +1672,7 @@ int __init amd_iommu_init_dma_ops(void)
|
||||
iommu->default_dom = dma_ops_domain_alloc(iommu, order);
|
||||
if (iommu->default_dom == NULL)
|
||||
return -ENOMEM;
|
||||
iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
|
||||
ret = iommu_init_unity_mappings(iommu);
|
||||
if (ret)
|
||||
goto free_domains;
|
||||
@@ -1375,6 +1696,12 @@ int __init amd_iommu_init_dma_ops(void)
|
||||
/* Make the driver finally visible to the drivers */
|
||||
dma_ops = &amd_iommu_dma_ops;
|
||||
|
||||
register_iommu(&amd_iommu_ops);
|
||||
|
||||
bus_register_notifier(&pci_bus_type, &device_nb);
|
||||
|
||||
amd_iommu_stats_init();
|
||||
|
||||
return 0;
|
||||
|
||||
free_domains:
|
||||
@@ -1386,3 +1713,224 @@ free_domains:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* The following functions belong to the exported interface of AMD IOMMU
|
||||
*
|
||||
* This interface allows access to lower level functions of the IOMMU
|
||||
* like protection domain handling and assignement of devices to domains
|
||||
* which is not possible with the dma_ops interface.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
static void cleanup_domain(struct protection_domain *domain)
|
||||
{
|
||||
unsigned long flags;
|
||||
u16 devid;
|
||||
|
||||
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
||||
|
||||
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
|
||||
if (amd_iommu_pd_table[devid] == domain)
|
||||
__detach_device(domain, devid);
|
||||
|
||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||
}
|
||||
|
||||
static int amd_iommu_domain_init(struct iommu_domain *dom)
|
||||
{
|
||||
struct protection_domain *domain;
|
||||
|
||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||
if (!domain)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&domain->lock);
|
||||
domain->mode = PAGE_MODE_3_LEVEL;
|
||||
domain->id = domain_id_alloc();
|
||||
if (!domain->id)
|
||||
goto out_free;
|
||||
domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!domain->pt_root)
|
||||
goto out_free;
|
||||
|
||||
dom->priv = domain;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
kfree(domain);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void amd_iommu_domain_destroy(struct iommu_domain *dom)
|
||||
{
|
||||
struct protection_domain *domain = dom->priv;
|
||||
|
||||
if (!domain)
|
||||
return;
|
||||
|
||||
if (domain->dev_cnt > 0)
|
||||
cleanup_domain(domain);
|
||||
|
||||
BUG_ON(domain->dev_cnt != 0);
|
||||
|
||||
free_pagetable(domain);
|
||||
|
||||
domain_id_free(domain->id);
|
||||
|
||||
kfree(domain);
|
||||
|
||||
dom->priv = NULL;
|
||||
}
|
||||
|
||||
static void amd_iommu_detach_device(struct iommu_domain *dom,
|
||||
struct device *dev)
|
||||
{
|
||||
struct protection_domain *domain = dom->priv;
|
||||
struct amd_iommu *iommu;
|
||||
struct pci_dev *pdev;
|
||||
u16 devid;
|
||||
|
||||
if (dev->bus != &pci_bus_type)
|
||||
return;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
|
||||
devid = calc_devid(pdev->bus->number, pdev->devfn);
|
||||
|
||||
if (devid > 0)
|
||||
detach_device(domain, devid);
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
if (!iommu)
|
||||
return;
|
||||
|
||||
iommu_queue_inv_dev_entry(iommu, devid);
|
||||
iommu_completion_wait(iommu);
|
||||
}
|
||||
|
||||
static int amd_iommu_attach_device(struct iommu_domain *dom,
|
||||
struct device *dev)
|
||||
{
|
||||
struct protection_domain *domain = dom->priv;
|
||||
struct protection_domain *old_domain;
|
||||
struct amd_iommu *iommu;
|
||||
struct pci_dev *pdev;
|
||||
u16 devid;
|
||||
|
||||
if (dev->bus != &pci_bus_type)
|
||||
return -EINVAL;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
|
||||
devid = calc_devid(pdev->bus->number, pdev->devfn);
|
||||
|
||||
if (devid >= amd_iommu_last_bdf ||
|
||||
devid != amd_iommu_alias_table[devid])
|
||||
return -EINVAL;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
if (!iommu)
|
||||
return -EINVAL;
|
||||
|
||||
old_domain = domain_for_device(devid);
|
||||
if (old_domain)
|
||||
return -EBUSY;
|
||||
|
||||
attach_device(iommu, domain, devid);
|
||||
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_iommu_map_range(struct iommu_domain *dom,
|
||||
unsigned long iova, phys_addr_t paddr,
|
||||
size_t size, int iommu_prot)
|
||||
{
|
||||
struct protection_domain *domain = dom->priv;
|
||||
unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
|
||||
int prot = 0;
|
||||
int ret;
|
||||
|
||||
if (iommu_prot & IOMMU_READ)
|
||||
prot |= IOMMU_PROT_IR;
|
||||
if (iommu_prot & IOMMU_WRITE)
|
||||
prot |= IOMMU_PROT_IW;
|
||||
|
||||
iova &= PAGE_MASK;
|
||||
paddr &= PAGE_MASK;
|
||||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
ret = iommu_map_page(domain, iova, paddr, prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iova += PAGE_SIZE;
|
||||
paddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_iommu_unmap_range(struct iommu_domain *dom,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
|
||||
struct protection_domain *domain = dom->priv;
|
||||
unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
|
||||
|
||||
iova &= PAGE_MASK;
|
||||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
iommu_unmap_page(domain, iova);
|
||||
iova += PAGE_SIZE;
|
||||
}
|
||||
|
||||
iommu_flush_domain(domain->id);
|
||||
}
|
||||
|
||||
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
||||
unsigned long iova)
|
||||
{
|
||||
struct protection_domain *domain = dom->priv;
|
||||
unsigned long offset = iova & ~PAGE_MASK;
|
||||
phys_addr_t paddr;
|
||||
u64 *pte;
|
||||
|
||||
pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)];
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(*pte))
|
||||
return 0;
|
||||
|
||||
pte = IOMMU_PTE_PAGE(*pte);
|
||||
pte = &pte[IOMMU_PTE_L1_INDEX(iova)];
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(*pte))
|
||||
return 0;
|
||||
|
||||
pte = IOMMU_PTE_PAGE(*pte);
|
||||
pte = &pte[IOMMU_PTE_L0_INDEX(iova)];
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(*pte))
|
||||
return 0;
|
||||
|
||||
paddr = *pte & IOMMU_PAGE_MASK;
|
||||
paddr |= offset;
|
||||
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static struct iommu_ops amd_iommu_ops = {
|
||||
.domain_init = amd_iommu_domain_init,
|
||||
.domain_destroy = amd_iommu_domain_destroy,
|
||||
.attach_dev = amd_iommu_attach_device,
|
||||
.detach_dev = amd_iommu_detach_device,
|
||||
.map = amd_iommu_map_range,
|
||||
.unmap = amd_iommu_unmap_range,
|
||||
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||
};
|
||||
|
||||
|
@@ -122,7 +122,8 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
|
||||
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
|
||||
we find in ACPI */
|
||||
unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
|
||||
int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
|
||||
bool amd_iommu_isolate = true; /* if true, device isolation is
|
||||
enabled */
|
||||
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
|
||||
|
||||
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
|
||||
@@ -243,20 +244,16 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
|
||||
}
|
||||
|
||||
/* Function to enable the hardware */
|
||||
void __init iommu_enable(struct amd_iommu *iommu)
|
||||
static void __init iommu_enable(struct amd_iommu *iommu)
|
||||
{
|
||||
printk(KERN_INFO "AMD IOMMU: Enabling IOMMU "
|
||||
"at %02x:%02x.%x cap 0x%hx\n",
|
||||
iommu->dev->bus->number,
|
||||
PCI_SLOT(iommu->dev->devfn),
|
||||
PCI_FUNC(iommu->dev->devfn),
|
||||
iommu->cap_ptr);
|
||||
printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n",
|
||||
dev_name(&iommu->dev->dev), iommu->cap_ptr);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
|
||||
}
|
||||
|
||||
/* Function to enable IOMMU event logging and event interrupts */
|
||||
void __init iommu_enable_event_logging(struct amd_iommu *iommu)
|
||||
static void __init iommu_enable_event_logging(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
|
||||
@@ -1218,9 +1215,9 @@ static int __init parse_amd_iommu_options(char *str)
|
||||
{
|
||||
for (; *str; ++str) {
|
||||
if (strncmp(str, "isolate", 7) == 0)
|
||||
amd_iommu_isolate = 1;
|
||||
amd_iommu_isolate = true;
|
||||
if (strncmp(str, "share", 5) == 0)
|
||||
amd_iommu_isolate = 0;
|
||||
amd_iommu_isolate = false;
|
||||
if (strncmp(str, "fullflush", 9) == 0)
|
||||
amd_iommu_unmap_flush = true;
|
||||
}
|
||||
|
@@ -98,8 +98,8 @@ __setup("apicpmtimer", setup_apicpmtimer);
|
||||
#ifdef HAVE_X2APIC
|
||||
int x2apic;
|
||||
/* x2apic enabled before OS handover */
|
||||
int x2apic_preenabled;
|
||||
int disable_x2apic;
|
||||
static int x2apic_preenabled;
|
||||
static int disable_x2apic;
|
||||
static __init int setup_nox2apic(char *str)
|
||||
{
|
||||
disable_x2apic = 1;
|
||||
@@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_broadcast(const cpumask_t *mask);
|
||||
static void lapic_timer_broadcast(const struct cpumask *mask);
|
||||
static void apic_pm_activate(void);
|
||||
|
||||
/*
|
||||
@@ -226,7 +226,7 @@ void xapic_icr_write(u32 low, u32 id)
|
||||
apic_write(APIC_ICR, low);
|
||||
}
|
||||
|
||||
u64 xapic_icr_read(void)
|
||||
static u64 xapic_icr_read(void)
|
||||
{
|
||||
u32 icr1, icr2;
|
||||
|
||||
@@ -266,7 +266,7 @@ void x2apic_icr_write(u32 low, u32 id)
|
||||
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
|
||||
}
|
||||
|
||||
u64 x2apic_icr_read(void)
|
||||
static u64 x2apic_icr_read(void)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
@@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
/*
|
||||
* Local APIC timer broadcast function
|
||||
*/
|
||||
static void lapic_timer_broadcast(const cpumask_t *mask)
|
||||
static void lapic_timer_broadcast(const struct cpumask *mask)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
|
||||
|
@@ -25,7 +25,7 @@
|
||||
#include <asm/uv/bios.h>
|
||||
#include <asm/uv/uv_hub.h>
|
||||
|
||||
struct uv_systab uv_systab;
|
||||
static struct uv_systab uv_systab;
|
||||
|
||||
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
||||
{
|
||||
|
@@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
|
||||
} else if (smp_num_siblings > 1) {
|
||||
|
||||
if (smp_num_siblings > NR_CPUS) {
|
||||
if (smp_num_siblings > nr_cpu_ids) {
|
||||
printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
|
||||
smp_num_siblings);
|
||||
smp_num_siblings = 1;
|
||||
|
@@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void free_acpi_perf_data(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
|
||||
for_each_possible_cpu(i)
|
||||
free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
|
||||
->shared_cpu_map);
|
||||
free_percpu(acpi_perf_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* acpi_cpufreq_early_init - initialize ACPI P-States library
|
||||
*
|
||||
@@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
|
||||
*/
|
||||
static int __init acpi_cpufreq_early_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
dprintk("acpi_cpufreq_early_init\n");
|
||||
|
||||
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
|
||||
@@ -534,6 +546,16 @@ static int __init acpi_cpufreq_early_init(void)
|
||||
dprintk("Memory allocation error for acpi_perf_data.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for_each_possible_cpu(i) {
|
||||
if (!alloc_cpumask_var_node(
|
||||
&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
|
||||
GFP_KERNEL, cpu_to_node(i))) {
|
||||
|
||||
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
|
||||
free_acpi_perf_data();
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/* Do initialization in ACPI core */
|
||||
acpi_processor_preregister_performance(acpi_perf_data);
|
||||
@@ -604,9 +626,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
*/
|
||||
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
|
||||
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
|
||||
policy->cpus = perf->shared_cpu_map;
|
||||
cpumask_copy(&policy->cpus, perf->shared_cpu_map);
|
||||
}
|
||||
policy->related_cpus = perf->shared_cpu_map;
|
||||
cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
dmi_check_system(sw_any_bug_dmi_table);
|
||||
@@ -795,7 +817,7 @@ static int __init acpi_cpufreq_init(void)
|
||||
|
||||
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
|
||||
if (ret)
|
||||
free_percpu(acpi_perf_data);
|
||||
free_acpi_perf_data();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -310,6 +310,12 @@ static int powernow_acpi_init(void)
|
||||
goto err0;
|
||||
}
|
||||
|
||||
if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
|
||||
GFP_KERNEL)) {
|
||||
retval = -ENOMEM;
|
||||
goto err05;
|
||||
}
|
||||
|
||||
if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
|
||||
retval = -EIO;
|
||||
goto err1;
|
||||
@@ -412,6 +418,8 @@ static int powernow_acpi_init(void)
|
||||
err2:
|
||||
acpi_processor_unregister_performance(acpi_processor_perf, 0);
|
||||
err1:
|
||||
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
|
||||
err05:
|
||||
kfree(acpi_processor_perf);
|
||||
err0:
|
||||
printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
|
||||
@@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
|
||||
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
|
||||
if (acpi_processor_perf) {
|
||||
acpi_processor_unregister_performance(acpi_processor_perf, 0);
|
||||
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
|
||||
kfree(acpi_processor_perf);
|
||||
}
|
||||
#endif
|
||||
|
@@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
|
||||
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
{
|
||||
struct cpufreq_frequency_table *powernow_table;
|
||||
int ret_val;
|
||||
int ret_val = -ENODEV;
|
||||
|
||||
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
|
||||
dprintk("register performance failed: bad ACPI data\n");
|
||||
@@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
/* notify BIOS that we exist */
|
||||
acpi_processor_notify_smm(THIS_MODULE);
|
||||
|
||||
if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
|
||||
printk(KERN_ERR PFX
|
||||
"unable to alloc powernow_k8_data cpumask\n");
|
||||
ret_val = -ENOMEM;
|
||||
goto err_out_mem;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_mem:
|
||||
@@ -826,7 +833,7 @@ err_out:
|
||||
/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
|
||||
data->acpi_data.state_count = 0;
|
||||
|
||||
return -ENODEV;
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
|
||||
@@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
|
||||
{
|
||||
if (data->acpi_data.state_count)
|
||||
acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
|
||||
free_cpumask_var(data->acpi_data.shared_cpu_map);
|
||||
}
|
||||
|
||||
#else
|
||||
@@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
data->cpu = pol->cpu;
|
||||
data->currpstate = HW_PSTATE_INVALID;
|
||||
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
rc = powernow_k8_cpu_init_acpi(data);
|
||||
if (rc) {
|
||||
/*
|
||||
* Use the PSB BIOS structure. This is only availabe on
|
||||
* an UP version, and is deprecated by AMD.
|
||||
@@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
"ACPI maintainers and complain to your BIOS "
|
||||
"vendor.\n");
|
||||
#endif
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
if (pol->cpu != 0) {
|
||||
printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
|
||||
"CPU other than CPU0. Complain to your BIOS "
|
||||
"vendor.\n");
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
rc = find_psb_table(data);
|
||||
if (rc) {
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -534,7 +534,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
|
||||
per_cpu(cpuid4_info, cpu) = NULL;
|
||||
}
|
||||
|
||||
static void get_cpu_leaves(void *_retval)
|
||||
static void __cpuinit get_cpu_leaves(void *_retval)
|
||||
{
|
||||
int j, *retval = _retval, cpu = smp_processor_id();
|
||||
|
||||
|
@@ -824,16 +824,14 @@ static int enable_mtrr_cleanup __initdata =
|
||||
|
||||
static int __init disable_mtrr_cleanup_setup(char *str)
|
||||
{
|
||||
if (enable_mtrr_cleanup != -1)
|
||||
enable_mtrr_cleanup = 0;
|
||||
enable_mtrr_cleanup = 0;
|
||||
return 0;
|
||||
}
|
||||
early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
|
||||
|
||||
static int __init enable_mtrr_cleanup_setup(char *str)
|
||||
{
|
||||
if (enable_mtrr_cleanup != -1)
|
||||
enable_mtrr_cleanup = 1;
|
||||
enable_mtrr_cleanup = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
|
||||
|
@@ -39,10 +39,10 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
static struct class *cpuid_class;
|
||||
@@ -82,7 +82,7 @@ static loff_t cpuid_seek(struct file *file, loff_t offset, int orig)
|
||||
}
|
||||
|
||||
static ssize_t cpuid_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t * ppos)
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
char __user *tmp = buf;
|
||||
struct cpuid_regs cmd;
|
||||
@@ -117,11 +117,11 @@ static int cpuid_open(struct inode *inode, struct file *file)
|
||||
unsigned int cpu;
|
||||
struct cpuinfo_x86 *c;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
lock_kernel();
|
||||
|
||||
cpu = iminor(file->f_path.dentry->d_inode);
|
||||
if (cpu >= NR_CPUS || !cpu_online(cpu)) {
|
||||
if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
|
||||
ret = -ENXIO; /* No such CPU */
|
||||
goto out;
|
||||
}
|
||||
|
@@ -886,7 +886,7 @@ asmlinkage void early_printk(const char *fmt, ...)
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
n = vscnprintf(buf, 512, fmt, ap);
|
||||
n = vscnprintf(buf, sizeof(buf), fmt, ap);
|
||||
early_console->write(early_console, buf, n);
|
||||
va_end(ap);
|
||||
}
|
||||
|
@@ -161,12 +161,12 @@ static unsigned int phys_pkg_id(int index_msb)
|
||||
return current_cpu_data.initial_apicid >> index_msb;
|
||||
}
|
||||
|
||||
void x2apic_send_IPI_self(int vector)
|
||||
static void x2apic_send_IPI_self(int vector)
|
||||
{
|
||||
apic_write(APIC_SELF_IPI, vector);
|
||||
}
|
||||
|
||||
void init_x2apic_ldr(void)
|
||||
static void init_x2apic_ldr(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#include <asm/trampoline.h>
|
||||
|
||||
/* boot cpu pda */
|
||||
static struct x8664_pda _boot_cpu_pda __read_mostly;
|
||||
static struct x8664_pda _boot_cpu_pda;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
|
@@ -214,11 +214,11 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
|
||||
|
||||
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
|
||||
if (cfg) {
|
||||
/* FIXME: needs alloc_cpumask_var_node() */
|
||||
if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
|
||||
if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) {
|
||||
} else if (!alloc_cpumask_var_node(&cfg->old_domain,
|
||||
GFP_ATOMIC, node)) {
|
||||
free_cpumask_var(cfg->domain);
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
@@ -706,7 +706,7 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
void io_apic_sync(struct irq_pin_list *entry)
|
||||
static void io_apic_sync(struct irq_pin_list *entry)
|
||||
{
|
||||
/*
|
||||
* Synchronize the IO-APIC and the CPU by doing
|
||||
|
@@ -12,8 +12,8 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/ldt.h>
|
||||
#include <asm/desc.h>
|
||||
@@ -93,7 +93,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
for(i = 0; i < old->size; i++)
|
||||
for (i = 0; i < old->size; i++)
|
||||
write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -13,8 +13,7 @@
|
||||
#include <asm/msr.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/mmconfig.h>
|
||||
|
||||
#include "../pci/pci.h"
|
||||
#include <asm/pci_x86.h>
|
||||
|
||||
struct pci_hostbridge_probe {
|
||||
u32 bus;
|
||||
|
@@ -16,14 +16,14 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/bios_ebda.h>
|
||||
#include <asm/e820.h>
|
||||
#include <asm/trampoline.h>
|
||||
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
|
||||
#endif
|
||||
|
||||
if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
|
||||
set_bit(m->mpc_busid, mp_bus_not_pci);
|
||||
#if defined(CONFIG_EISA) || defined (CONFIG_MCA)
|
||||
set_bit(m->mpc_busid, mp_bus_not_pci);
|
||||
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
|
||||
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
|
||||
#endif
|
||||
} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
|
||||
@@ -104,7 +104,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
|
||||
x86_quirks->mpc_oem_pci_bus(m);
|
||||
|
||||
clear_bit(m->mpc_busid, mp_bus_not_pci);
|
||||
#if defined(CONFIG_EISA) || defined (CONFIG_MCA)
|
||||
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
|
||||
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
|
||||
} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
|
||||
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
|
||||
|
@@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file)
|
||||
lock_kernel();
|
||||
cpu = iminor(file->f_path.dentry->d_inode);
|
||||
|
||||
if (cpu >= NR_CPUS || !cpu_online(cpu)) {
|
||||
if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
|
||||
ret = -ENXIO; /* No such CPU */
|
||||
goto out;
|
||||
}
|
||||
|
@@ -26,11 +26,10 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/nmi.h>
|
||||
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/timer.h>
|
||||
|
||||
|
@@ -52,7 +52,7 @@ static u32 *iommu_gatt_base; /* Remapping table */
|
||||
* to trigger bugs with some popular PCI cards, in particular 3ware (but
|
||||
* has been also also seen with Qlogic at least).
|
||||
*/
|
||||
int iommu_fullflush = 1;
|
||||
static int iommu_fullflush = 1;
|
||||
|
||||
/* Allocation bitmap for the remapping area: */
|
||||
static DEFINE_SPINLOCK(iommu_bitmap_lock);
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <asm/proto.h>
|
||||
#include <asm/reboot_fixups.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/pci_x86.h>
|
||||
#include <asm/virtext.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -24,7 +25,6 @@
|
||||
|
||||
#include <mach_ipi.h>
|
||||
|
||||
|
||||
/*
|
||||
* Power off function, if any
|
||||
*/
|
||||
@@ -501,7 +501,7 @@ void native_machine_shutdown(void)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* See if there has been given a command line override */
|
||||
if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
|
||||
if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
|
||||
cpu_online(reboot_cpu))
|
||||
reboot_cpu_id = reboot_cpu;
|
||||
#endif
|
||||
@@ -511,7 +511,7 @@ void native_machine_shutdown(void)
|
||||
reboot_cpu_id = smp_processor_id();
|
||||
|
||||
/* Make certain I only run on the appropriate processor */
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
|
||||
|
||||
/* O.K Now that I'm on the appropriate processor,
|
||||
* stop all of the others.
|
||||
|
@@ -153,12 +153,10 @@ void __init setup_per_cpu_areas(void)
|
||||
align = max_t(unsigned long, PAGE_SIZE, align);
|
||||
size = roundup(old_size, align);
|
||||
|
||||
printk(KERN_INFO
|
||||
"NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
|
||||
pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
|
||||
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
|
||||
|
||||
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
|
||||
size);
|
||||
pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
@@ -169,22 +167,15 @@ void __init setup_per_cpu_areas(void)
|
||||
if (!node_online(node) || !NODE_DATA(node)) {
|
||||
ptr = __alloc_bootmem(size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
printk(KERN_INFO
|
||||
"cpu %d has no node %d or node-local memory\n",
|
||||
pr_info("cpu %d has no node %d or node-local memory\n",
|
||||
cpu, node);
|
||||
if (ptr)
|
||||
printk(KERN_DEBUG
|
||||
"per cpu data for cpu%d at %016lx\n",
|
||||
cpu, __pa(ptr));
|
||||
}
|
||||
else {
|
||||
pr_debug("per cpu data for cpu%d at %016lx\n",
|
||||
cpu, __pa(ptr));
|
||||
} else {
|
||||
ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
if (ptr)
|
||||
printk(KERN_DEBUG
|
||||
"per cpu data for cpu%d on node%d "
|
||||
"at %016lx\n",
|
||||
cpu, node, __pa(ptr));
|
||||
pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
|
||||
cpu, node, __pa(ptr));
|
||||
}
|
||||
#endif
|
||||
per_cpu_offset(cpu) = ptr - __per_cpu_start;
|
||||
@@ -339,25 +330,25 @@ static const cpumask_t cpu_mask_none;
|
||||
/*
|
||||
* Returns a pointer to the bitmask of CPUs on Node 'node'.
|
||||
*/
|
||||
const cpumask_t *_node_to_cpumask_ptr(int node)
|
||||
const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
if (node_to_cpumask_map == NULL) {
|
||||
printk(KERN_WARNING
|
||||
"_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
|
||||
"cpumask_of_node(%d): no node_to_cpumask_map!\n",
|
||||
node);
|
||||
dump_stack();
|
||||
return (const cpumask_t *)&cpu_online_map;
|
||||
}
|
||||
if (node >= nr_node_ids) {
|
||||
printk(KERN_WARNING
|
||||
"_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n",
|
||||
"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
|
||||
node, nr_node_ids);
|
||||
dump_stack();
|
||||
return &cpu_mask_none;
|
||||
}
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
EXPORT_SYMBOL(_node_to_cpumask_ptr);
|
||||
EXPORT_SYMBOL(cpumask_of_node);
|
||||
|
||||
/*
|
||||
* Returns a bitmask of CPUs on Node 'node'.
|
||||
|
@@ -496,7 +496,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
}
|
||||
|
||||
/* maps the cpu to the sched domain representing multi-core */
|
||||
cpumask_t cpu_coregroup_map(int cpu)
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
/*
|
||||
@@ -504,9 +504,14 @@ cpumask_t cpu_coregroup_map(int cpu)
|
||||
* And for power savings, we return cpu_core_map
|
||||
*/
|
||||
if (sched_mc_power_savings || sched_smt_power_savings)
|
||||
return per_cpu(cpu_core_map, cpu);
|
||||
return &per_cpu(cpu_core_map, cpu);
|
||||
else
|
||||
return c->llc_shared_map;
|
||||
return &c->llc_shared_map;
|
||||
}
|
||||
|
||||
cpumask_t cpu_coregroup_map(int cpu)
|
||||
{
|
||||
return *cpu_coregroup_mask(cpu);
|
||||
}
|
||||
|
||||
static void impress_friends(void)
|
||||
@@ -1149,7 +1154,7 @@ static void __init smp_cpu_index_default(void)
|
||||
for_each_possible_cpu(i) {
|
||||
c = &cpu_data(i);
|
||||
/* mark all to hotplug */
|
||||
c->cpu_index = NR_CPUS;
|
||||
c->cpu_index = nr_cpu_ids;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1293,6 +1298,8 @@ __init void prefill_possible_map(void)
|
||||
else
|
||||
possible = setup_possible_cpus;
|
||||
|
||||
total_cpus = max_t(int, possible, num_processors + disabled_cpus);
|
||||
|
||||
if (possible > CONFIG_NR_CPUS) {
|
||||
printk(KERN_WARNING
|
||||
"%d Processors exceeds NR_CPUS limit of %d\n",
|
||||
|
@@ -582,7 +582,6 @@ static int __init uv_ptc_init(void)
|
||||
static struct bau_control * __init uv_table_bases_init(int blade, int node)
|
||||
{
|
||||
int i;
|
||||
int *ip;
|
||||
struct bau_msg_status *msp;
|
||||
struct bau_control *bau_tabp;
|
||||
|
||||
@@ -599,13 +598,6 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
|
||||
bau_cpubits_clear(&msp->seen_by, (int)
|
||||
uv_blade_nr_possible_cpus(blade));
|
||||
|
||||
bau_tabp->watching =
|
||||
kmalloc_node(sizeof(int) * DEST_NUM_RESOURCES, GFP_KERNEL, node);
|
||||
BUG_ON(!bau_tabp->watching);
|
||||
|
||||
for (i = 0, ip = bau_tabp->watching; i < DEST_Q_SIZE; i++, ip++)
|
||||
*ip = 0;
|
||||
|
||||
uv_bau_table_bases[blade] = bau_tabp;
|
||||
|
||||
return bau_tabp;
|
||||
@@ -628,7 +620,6 @@ uv_table_bases_finish(int blade, int node, int cur_cpu,
|
||||
bcp->bau_msg_head = bau_tablesp->va_queue_first;
|
||||
bcp->va_queue_first = bau_tablesp->va_queue_first;
|
||||
bcp->va_queue_last = bau_tablesp->va_queue_last;
|
||||
bcp->watching = bau_tablesp->watching;
|
||||
bcp->msg_statuses = bau_tablesp->msg_statuses;
|
||||
bcp->descriptor_base = adp;
|
||||
}
|
||||
|
@@ -292,8 +292,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
||||
tsk->thread.error_code = error_code;
|
||||
tsk->thread.trap_no = 8;
|
||||
|
||||
/* This is always a kernel trap and never fixable (and thus must
|
||||
never return). */
|
||||
/*
|
||||
* This is always a kernel trap and never fixable (and thus must
|
||||
* never return).
|
||||
*/
|
||||
for (;;)
|
||||
die(str, regs, error_code);
|
||||
}
|
||||
@@ -520,9 +522,11 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Help handler running on IST stack to switch back to user stack
|
||||
for scheduling or signal handling. The actual stack switch is done in
|
||||
entry.S */
|
||||
/*
|
||||
* Help handler running on IST stack to switch back to user stack
|
||||
* for scheduling or signal handling. The actual stack switch is done in
|
||||
* entry.S
|
||||
*/
|
||||
asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
||||
{
|
||||
struct pt_regs *regs = eregs;
|
||||
@@ -532,8 +536,10 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
||||
/* Exception from user space */
|
||||
else if (user_mode(eregs))
|
||||
regs = task_pt_regs(current);
|
||||
/* Exception from kernel and interrupts are enabled. Move to
|
||||
kernel process stack. */
|
||||
/*
|
||||
* Exception from kernel and interrupts are enabled. Move to
|
||||
* kernel process stack.
|
||||
*/
|
||||
else if (eregs->flags & X86_EFLAGS_IF)
|
||||
regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
|
||||
if (eregs != regs)
|
||||
@@ -685,12 +691,7 @@ void math_error(void __user *ip)
|
||||
cwd = get_fpu_cwd(task);
|
||||
swd = get_fpu_swd(task);
|
||||
|
||||
err = swd & ~cwd & 0x3f;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (!err)
|
||||
return;
|
||||
#endif
|
||||
err = swd & ~cwd;
|
||||
|
||||
if (err & 0x001) { /* Invalid op */
|
||||
/*
|
||||
@@ -708,7 +709,11 @@ void math_error(void __user *ip)
|
||||
} else if (err & 0x020) { /* Precision */
|
||||
info.si_code = FPE_FLTRES;
|
||||
} else {
|
||||
info.si_code = __SI_FAULT|SI_KERNEL; /* WTF? */
|
||||
/*
|
||||
* If we're using IRQ 13, or supposedly even some trap 16
|
||||
* implementations, it's possible we get a spurious trap...
|
||||
*/
|
||||
return; /* Spurious trap, no error */
|
||||
}
|
||||
force_sig_info(SIGFPE, &info, task);
|
||||
}
|
||||
|
@@ -159,7 +159,7 @@ int save_i387_xstate(void __user *buf)
|
||||
* Restore the extended state if present. Otherwise, restore the FP/SSE
|
||||
* state.
|
||||
*/
|
||||
int restore_user_xstate(void __user *buf)
|
||||
static int restore_user_xstate(void __user *buf)
|
||||
{
|
||||
struct _fpx_sw_bytes fx_sw_user;
|
||||
u64 mask;
|
||||
|
Reference in New Issue
Block a user