|
|
Subscribe / Log in / New account

x86: L3 cache index disable for 2.6.26

From:  Mark Langsdorf <mark.langsdorf@amd.com>
To:  Greg KH <greg@kroah.com>
Subject:  [PATCH 01/01][retry 2] x86: L3 cache index disable for 2.6.26
Date:  Wed, 13 Aug 2008 15:02:42 -0500
Message-ID:  <200808131502.42611.mark.langsdorf@amd.com>
Cc:  Pavel Machek <pavel@suse.cz>, joachim.deguara@amd.com, gregkh@ucw.cz, tglx@linutronix.de, mingo@redhat.com, hpa@zytor.com, linux-kernel@vger.kernel.org
Archive‑link:  Article

New versions of AMD processors have support to disable parts
of their L3 caches if too many MCEs are generated by the
L3 cache.  

This patch provides a /sysfs interface under the cache
hierarchy to display which caches indices are disabled
(if any) and to monitoring applications to disable a
cache index.

This patch does not set an automatic policy to disable
the L3 cache.  Policy decisions would need to be made
by a RAS handler.  This patch merely makes it easier to
see what indices are currently disabled.

Signed-off-by: Mark Langsdorf <mark.langsdorf@amd.com>

diff -r e683983d4dd0 Documentation/ABI/testing/sysfs-devices-cache_disable
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Documentation/ABI/testing/sysfs-devices-cache_disable	Wed Aug 13 09:06:52 2008 -0500
@@ -0,0 +1,18 @@
+What:           /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X
+Date:           Augsust 2008
+KernelVersion:  2.6.27
+Contact:        osrc-kernel@elbe.amd.com
+Description:    These files exist in every cpu's cache index directories.
+		There are currently 2 cache_disable_# files in each 
+		directory.  Reading from these files on a supported
+		processor will return that cache disable index value
+		for that processor and node.  Writing to one of these
+		files will cause the specificed cache index to be disable.
+
+		Currently, only AMD Family 10h Processors support cache index 
+		disable, and only for their L3 caches.  See the BIOS and
+		Kernel Developer's Guide at
+		http://www.amd.com/us-en/assets/content_type/white_papers...
+		for formatting information and other details on the
+		cache index disable.
+Users:          joachim.deguara@amd.com
diff -r e683983d4dd0 arch/x86/kernel/cpu/intel_cacheinfo.c
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c	Tue Aug 12 08:46:38 2008 -0500
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c	Wed Aug 13 08:47:59 2008 -0500
@@ -130,6 +130,7 @@ struct _cpuid4_info {
 	union _cpuid4_leaf_ebx ebx;
 	union _cpuid4_leaf_ecx ecx;
 	unsigned long size;
+	unsigned long can_disable;
 	cpumask_t shared_cpu_map;	/* future?: only cpus/node is needed */
 };
 
@@ -251,6 +252,14 @@ static void __cpuinit amd_cpuid4(int lea
 		(ebx->split.ways_of_associativity + 1) - 1;
 }
 
+static void __cpuinit 
+amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
+{
+	if (index < 3)
+		return;
+	this_leaf->can_disable = 1;     
+}
+
 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
 {
 	union _cpuid4_leaf_eax 	eax;
@@ -258,10 +267,13 @@ static int __cpuinit cpuid4_cache_lookup
 	union _cpuid4_leaf_ecx 	ecx;
 	unsigned		edx;
 
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
 		amd_cpuid4(index, &eax, &ebx, &ecx);
-	else
-		cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full,  &edx);
+		if (boot_cpu_data.x86 >= 0x10)
+			amd_check_l3_disable(index, this_leaf);
+	} else {
+		cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
+	}
 	if (eax.split.type == CACHE_TYPE_NULL)
 		return -EIO; /* better error ? */
 
@@ -269,9 +281,9 @@ static int __cpuinit cpuid4_cache_lookup
 	this_leaf->ebx = ebx;
 	this_leaf->ecx = ecx;
 	this_leaf->size = (ecx.split.number_of_sets + 1) *
-		(ebx.split.coherency_line_size + 1) *
-		(ebx.split.physical_line_partition + 1) *
-		(ebx.split.ways_of_associativity + 1);
+			(ebx.split.coherency_line_size + 1) *
+			(ebx.split.physical_line_partition + 1) *
+			(ebx.split.ways_of_associativity + 1);
 	return 0;
 }
 
@@ -574,6 +586,9 @@ static DEFINE_PER_CPU(struct _index_kobj
 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
 #define INDEX_KOBJECT_PTR(x, y)    (&((per_cpu(index_kobject, x))[y]))
 
+#define to_object(k)    container_of(k, struct _index_kobject, kobj)
+#define to_attr(a)      container_of(a, struct _cache_attr, attr)
+
 #define show_one_plus(file_name, object, val)				\
 static ssize_t show_##file_name						\
 			(struct _cpuid4_info *this_leaf, char *buf)	\
@@ -619,6 +634,92 @@ static inline ssize_t show_shared_cpu_li
 {
 	return show_shared_cpu_map_func(leaf, 1, buf);
 }
+
+#if defined(CONFIG_PCI) && defined(CONFIG_K8_NB)
+#include <linux/pci.h>
+#include <asm/k8.h>
+static struct pci_dev *get_k8_northbridge(int node)
+{
+        return k8_northbridges[node];
+}
+#else
+static inline int pci_write_config_dword(struct pci_dev *dev, int where,
+                                         u32 val)
+{
+	return 0;
+}
+
+static inline int pci_read_config_dword(struct pci_dev *dev, int where,
+                                        u32 *val)
+{
+	return 0;
+}
+
+static struct pci_dev *get_k8_northbridge(int node)
+{
+        return NULL;
+}
+#endif
+
+static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
+		unsigned int index)
+{
+        int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+        struct pci_dev *dev = get_k8_northbridge(node);
+	unsigned int reg = 0;
+
+        if (!this_leaf->can_disable) 
+		return 0;
+
+	pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+        return sprintf(buf, "%x\n", reg);
+}
+
+#define SHOW_CACHE_DISABLE(index)					\
+static ssize_t                                                          \
+show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf)	\
+{									\
+	return show_cache_disable(this_leaf, buf, index);		\
+}
+
+static ssize_t
+store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
+		size_t count, unsigned int index)
+{
+        int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+        struct pci_dev *dev = get_k8_northbridge(node);
+        ssize_t ret = 0;
+        unsigned int val;
+
+        if (!this_leaf->can_disable)
+                return 0;
+
+        if (strlen(buf) > 10)
+                return -EINVAL;
+
+        ret = sscanf(buf, "%x\n", &val);
+        if (ret != 1)
+                return -EINVAL;
+
+        val |= 0xc0000000;
+        pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
+        wbinvd();
+        pci_write_config_dword(dev, 0x1BC + index * 4, val);
+        return strlen(buf);
+}
+
+#define STORE_CACHE_DISABLE(index)					\
+static ssize_t								\
+store_cache_disable_##index(struct _cpuid4_info *this_leaf, 		\
+		const char *buf, size_t count)				\
+{									\
+	return store_cache_disable(this_leaf, buf, count, index);	\
+}
+
+SHOW_CACHE_DISABLE(0)
+STORE_CACHE_DISABLE(0)
+SHOW_CACHE_DISABLE(1)
+STORE_CACHE_DISABLE(1)
 
 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
 	switch(this_leaf->eax.split.type) {
@@ -657,6 +758,10 @@ define_one_ro(shared_cpu_map);
 define_one_ro(shared_cpu_map);
 define_one_ro(shared_cpu_list);
 
+static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, show_cache_disable_0, store_cache_disable_0);
+static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, show_cache_disable_1, store_cache_disable_1);
+
+
 static struct attribute * default_attrs[] = {
 	&type.attr,
 	&level.attr,
@@ -667,11 +772,10 @@ static struct attribute * default_attrs[
 	&size.attr,
 	&shared_cpu_map.attr,
 	&shared_cpu_list.attr,
+	&cache_disable_0.attr,
+	&cache_disable_1.attr,
 	NULL
 };
-
-#define to_object(k) container_of(k, struct _index_kobject, kobj)
-#define to_attr(a) container_of(a, struct _cache_attr, attr)
 
 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
 {
@@ -689,7 +793,15 @@ static ssize_t store(struct kobject * ko
 static ssize_t store(struct kobject * kobj, struct attribute * attr,
 		     const char * buf, size_t count)
 {
-	return 0;
+	struct _cache_attr *fattr = to_attr(attr);
+	struct _index_kobject *this_leaf = to_object(kobj);
+	ssize_t ret;
+
+	ret = fattr->store ?
+		fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
+			 buf, count) :
+		0;
+	return ret;
 }
 
 static struct sysfs_ops sysfs_ops = {



Copyright © 2008, Eklektix, Inc.
Comments and public postings are copyrighted by their creators.
Linux is a registered trademark of Linus Torvalds