Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: Fix keeping track of AMD C1E x86, cpu: Package Level Thermal Control, Power Limit Notification definitions x86, cpu: Export AMD errata definitions x86, cpu: Use AMD errata checking framework for erratum 383 x86, cpu: Clean up AMD erratum 400 workaround x86, cpu: AMD errata checking framework x86, cpu: Split addon_cpuid_features.c x86, cpu: Clean up formatting in cpufeature.h, remove override x86, cpu: Enumerate xsaveopt x86, cpu: Add xsaveopt cpufeature x86, cpu: Make init_scattered_cpuid_features() consider cpuid subleaves x86, cpu: Support the features flags in new CPUID leaf 7 x86, cpu: Add CPU flags for F16C and RDRND x86: Look for IA32_ENERGY_PERF_BIAS support x86, AMD: Extend support to future families x86, cacheinfo: Carve out L3 cache slot accessors x86, xsave: Cleanup return codes in check_for_xstate()
This commit is contained in:
@@ -12,7 +12,7 @@ endif
|
||||
nostackp := $(call cc-option, -fno-stack-protector)
|
||||
CFLAGS_common.o := $(nostackp)
|
||||
|
||||
obj-y := intel_cacheinfo.o addon_cpuid_features.o
|
||||
obj-y := intel_cacheinfo.o scattered.o topology.o
|
||||
obj-y += proc.o capflags.o powerflags.o common.o
|
||||
obj-y += vmware.o hypervisor.o sched.o mshyperv.o
|
||||
|
||||
|
@@ -466,7 +466,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
}
|
||||
|
||||
}
|
||||
if (c->x86 == 0x10 || c->x86 == 0x11)
|
||||
if (c->x86 >= 0x10)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
/* get apicid instead of initial apic id from cpuid */
|
||||
@@ -529,7 +529,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
num_cache_leaves = 3;
|
||||
}
|
||||
|
||||
if (c->x86 >= 0xf && c->x86 <= 0x11)
|
||||
if (c->x86 >= 0xf)
|
||||
set_cpu_cap(c, X86_FEATURE_K8);
|
||||
|
||||
if (cpu_has_xmm2) {
|
||||
@@ -546,7 +546,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
fam10h_check_enable_mmcfg();
|
||||
}
|
||||
|
||||
if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
|
||||
if (c == &boot_cpu_data && c->x86 >= 0xf) {
|
||||
unsigned long long tseg;
|
||||
|
||||
/*
|
||||
@@ -609,3 +609,74 @@ static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
|
||||
};
|
||||
|
||||
cpu_dev_register(amd_cpu_dev);
|
||||
|
||||
/*
|
||||
* AMD errata checking
|
||||
*
|
||||
* Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
|
||||
* AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
|
||||
* have an OSVW id assigned, which it takes as first argument. Both take a
|
||||
* variable number of family-specific model-stepping ranges created by
|
||||
* AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
|
||||
* int[] in arch/x86/include/asm/processor.h.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* const int amd_erratum_319[] =
|
||||
* AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
|
||||
* AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
|
||||
* AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
|
||||
*/
|
||||
|
||||
const int amd_erratum_400[] =
|
||||
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
|
||||
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
|
||||
EXPORT_SYMBOL_GPL(amd_erratum_400);
|
||||
|
||||
const int amd_erratum_383[] =
|
||||
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
|
||||
EXPORT_SYMBOL_GPL(amd_erratum_383);
|
||||
|
||||
bool cpu_has_amd_erratum(const int *erratum)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = ¤t_cpu_data;
|
||||
int osvw_id = *erratum++;
|
||||
u32 range;
|
||||
u32 ms;
|
||||
|
||||
/*
|
||||
* If called early enough that current_cpu_data hasn't been initialized
|
||||
* yet, fall back to boot_cpu_data.
|
||||
*/
|
||||
if (cpu->x86 == 0)
|
||||
cpu = &boot_cpu_data;
|
||||
|
||||
if (cpu->x86_vendor != X86_VENDOR_AMD)
|
||||
return false;
|
||||
|
||||
if (osvw_id >= 0 && osvw_id < 65536 &&
|
||||
cpu_has(cpu, X86_FEATURE_OSVW)) {
|
||||
u64 osvw_len;
|
||||
|
||||
rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
|
||||
if (osvw_id < osvw_len) {
|
||||
u64 osvw_bits;
|
||||
|
||||
rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
|
||||
osvw_bits);
|
||||
return osvw_bits & (1ULL << (osvw_id & 0x3f));
|
||||
}
|
||||
}
|
||||
|
||||
/* OSVW unavailable or ID unknown, match family-model-stepping range */
|
||||
ms = (cpu->x86_model << 8) | cpu->x86_mask;
|
||||
while ((range = *erratum++))
|
||||
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
|
||||
(ms >= AMD_MODEL_RANGE_START(range)) &&
|
||||
(ms <= AMD_MODEL_RANGE_END(range)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
|
||||
|
@@ -551,6 +551,16 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
c->x86_capability[4] = excap;
|
||||
}
|
||||
|
||||
/* Additional Intel-defined flags: level 0x00000007 */
|
||||
if (c->cpuid_level >= 0x00000007) {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
if (eax > 0)
|
||||
c->x86_capability[9] = ebx;
|
||||
}
|
||||
|
||||
/* AMD-defined flags: level 0x80000001 */
|
||||
xlvl = cpuid_eax(0x80000000);
|
||||
c->extended_cpuid_level = xlvl;
|
||||
|
@@ -347,8 +347,8 @@ static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
|
||||
return l3;
|
||||
}
|
||||
|
||||
static void __cpuinit
|
||||
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
|
||||
static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
|
||||
int index)
|
||||
{
|
||||
int node;
|
||||
|
||||
@@ -396,20 +396,39 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
|
||||
this_leaf->l3 = l3_caches[node];
|
||||
}
|
||||
|
||||
/*
|
||||
* check whether a slot used for disabling an L3 index is occupied.
|
||||
* @l3: L3 cache descriptor
|
||||
* @slot: slot number (0..1)
|
||||
*
|
||||
* @returns: the disabled index if used or negative value if slot free.
|
||||
*/
|
||||
int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
|
||||
{
|
||||
unsigned int reg = 0;
|
||||
|
||||
pci_read_config_dword(l3->dev, 0x1BC + slot * 4, ®);
|
||||
|
||||
/* check whether this slot is activated already */
|
||||
if (reg & (3UL << 30))
|
||||
return reg & 0xfff;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
|
||||
unsigned int slot)
|
||||
{
|
||||
struct pci_dev *dev = this_leaf->l3->dev;
|
||||
unsigned int reg = 0;
|
||||
int index;
|
||||
|
||||
if (!this_leaf->l3 || !this_leaf->l3->can_disable)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
index = amd_get_l3_disable_slot(this_leaf->l3, slot);
|
||||
if (index >= 0)
|
||||
return sprintf(buf, "%d\n", index);
|
||||
|
||||
pci_read_config_dword(dev, 0x1BC + slot * 4, ®);
|
||||
return sprintf(buf, "0x%08x\n", reg);
|
||||
return sprintf(buf, "FREE\n");
|
||||
}
|
||||
|
||||
#define SHOW_CACHE_DISABLE(slot) \
|
||||
@@ -451,37 +470,74 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
||||
const char *buf, size_t count,
|
||||
unsigned int slot)
|
||||
/*
|
||||
* disable a L3 cache index by using a disable-slot
|
||||
*
|
||||
* @l3: L3 cache descriptor
|
||||
* @cpu: A CPU on the node containing the L3 cache
|
||||
* @slot: slot number (0..1)
|
||||
* @index: index to disable
|
||||
*
|
||||
* @return: 0 on success, error status on failure
|
||||
*/
|
||||
int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
|
||||
unsigned long index)
|
||||
{
|
||||
struct pci_dev *dev = this_leaf->l3->dev;
|
||||
int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
|
||||
unsigned long val = 0;
|
||||
int ret = 0;
|
||||
|
||||
#define SUBCACHE_MASK (3UL << 20)
|
||||
#define SUBCACHE_INDEX 0xfff
|
||||
|
||||
if (!this_leaf->l3 || !this_leaf->l3->can_disable)
|
||||
/*
|
||||
* check whether this slot is already used or
|
||||
* the index is already disabled
|
||||
*/
|
||||
ret = amd_get_l3_disable_slot(l3, slot);
|
||||
if (ret >= 0)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* check whether the other slot has disabled the
|
||||
* same index already
|
||||
*/
|
||||
if (index == amd_get_l3_disable_slot(l3, !slot))
|
||||
return -EINVAL;
|
||||
|
||||
/* do not allow writes outside of allowed bits */
|
||||
if ((index & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
|
||||
((index & SUBCACHE_INDEX) > l3->indices))
|
||||
return -EINVAL;
|
||||
|
||||
amd_l3_disable_index(l3, cpu, slot, index);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
||||
const char *buf, size_t count,
|
||||
unsigned int slot)
|
||||
{
|
||||
unsigned long val = 0;
|
||||
int cpu, err = 0;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!dev)
|
||||
if (!this_leaf->l3 || !this_leaf->l3->can_disable)
|
||||
return -EINVAL;
|
||||
|
||||
cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
|
||||
|
||||
if (strict_strtoul(buf, 10, &val) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* do not allow writes outside of allowed bits */
|
||||
if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
|
||||
((val & SUBCACHE_INDEX) > this_leaf->l3->indices))
|
||||
return -EINVAL;
|
||||
|
||||
amd_l3_disable_index(this_leaf->l3, cpu, slot, val);
|
||||
|
||||
err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
|
||||
if (err) {
|
||||
if (err == -EEXIST)
|
||||
printk(KERN_WARNING "L3 disable slot %d in use!\n",
|
||||
slot);
|
||||
return err;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
@@ -502,7 +558,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
||||
|
||||
#else /* CONFIG_CPU_SUP_AMD */
|
||||
static void __cpuinit
|
||||
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
|
||||
amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index)
|
||||
{
|
||||
};
|
||||
#endif /* CONFIG_CPU_SUP_AMD */
|
||||
@@ -518,7 +574,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
amd_cpuid4(index, &eax, &ebx, &ecx);
|
||||
amd_check_l3_disable(index, this_leaf);
|
||||
amd_check_l3_disable(this_leaf, index);
|
||||
} else {
|
||||
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
|
||||
}
|
||||
|
63
arch/x86/kernel/cpu/scattered.c
Normal file
63
arch/x86/kernel/cpu/scattered.c
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Routines to indentify additional cpu features that are scattered in
|
||||
* cpuid space.
|
||||
*/
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/pat.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
|
||||
struct cpuid_bit {
|
||||
u16 feature;
|
||||
u8 reg;
|
||||
u8 bit;
|
||||
u32 level;
|
||||
u32 sub_leaf;
|
||||
};
|
||||
|
||||
enum cpuid_regs {
|
||||
CR_EAX = 0,
|
||||
CR_ECX,
|
||||
CR_EDX,
|
||||
CR_EBX
|
||||
};
|
||||
|
||||
void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 max_level;
|
||||
u32 regs[4];
|
||||
const struct cpuid_bit *cb;
|
||||
|
||||
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
|
||||
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
|
||||
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
|
||||
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
|
||||
{ X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
|
||||
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
|
||||
{ X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 },
|
||||
{ X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
|
||||
{ X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
|
||||
{ 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
for (cb = cpuid_bits; cb->feature; cb++) {
|
||||
|
||||
/* Verify that the level is valid */
|
||||
max_level = cpuid_eax(cb->level & 0xffff0000);
|
||||
if (max_level < cb->level ||
|
||||
max_level > (cb->level | 0xffff))
|
||||
continue;
|
||||
|
||||
cpuid_count(cb->level, cb->sub_leaf, ®s[CR_EAX],
|
||||
®s[CR_EBX], ®s[CR_ECX], ®s[CR_EDX]);
|
||||
|
||||
if (regs[cb->reg] & (1 << cb->bit))
|
||||
set_cpu_cap(c, cb->feature);
|
||||
}
|
||||
}
|
@@ -1,62 +1,14 @@
|
||||
/*
|
||||
* Routines to indentify additional cpu features that are scattered in
|
||||
* cpuid space.
|
||||
* Check for extended topology enumeration cpuid leaf 0xb and if it
|
||||
* exists, use it for populating initial_apicid and cpu topology
|
||||
* detection.
|
||||
*/
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/pat.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
|
||||
struct cpuid_bit {
|
||||
u16 feature;
|
||||
u8 reg;
|
||||
u8 bit;
|
||||
u32 level;
|
||||
};
|
||||
|
||||
enum cpuid_regs {
|
||||
CR_EAX = 0,
|
||||
CR_ECX,
|
||||
CR_EDX,
|
||||
CR_EBX
|
||||
};
|
||||
|
||||
void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 max_level;
|
||||
u32 regs[4];
|
||||
const struct cpuid_bit *cb;
|
||||
|
||||
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
|
||||
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
|
||||
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
|
||||
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006 },
|
||||
{ X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 },
|
||||
{ X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
|
||||
{ X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
|
||||
{ X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
|
||||
{ X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
for (cb = cpuid_bits; cb->feature; cb++) {
|
||||
|
||||
/* Verify that the level is valid */
|
||||
max_level = cpuid_eax(cb->level & 0xffff0000);
|
||||
if (max_level < cb->level ||
|
||||
max_level > (cb->level | 0xffff))
|
||||
continue;
|
||||
|
||||
cpuid(cb->level, ®s[CR_EAX], ®s[CR_EBX],
|
||||
®s[CR_ECX], ®s[CR_EDX]);
|
||||
|
||||
if (regs[cb->reg] & (1 << cb->bit))
|
||||
set_cpu_cap(c, cb->feature);
|
||||
}
|
||||
}
|
||||
|
||||
/* leaf 0xb SMT level */
|
||||
#define SMT_LEVEL 0
|
||||
|
Reference in New Issue
Block a user