summaryrefslogtreecommitdiff
path: root/drivers/crypto/padlock-sha.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2012-01-26 03:09:06 +0400
committerGreg Kroah-Hartman <gregkh@suse.de>2012-01-27 04:48:10 +0400
commit3bd391f056df61e928de1680ff4a3e7e07e5b399 (patch)
treef50b5c80010b901a6542b7f4e4913b76d77f3b72 /drivers/crypto/padlock-sha.c
parent644e9cbbe3fc032cc92d0936057e166a994dc246 (diff)
downloadlinux-3bd391f056df61e928de1680ff4a3e7e07e5b399.tar.xz
crypto: Add support for x86 cpuid auto loading for x86 crypto drivers
Add support for auto-loading of crypto drivers based on cpuid features. This enables auto-loading of the VIA and Intel specific drivers for AES, hashing and CRCs. Requires the earlier infrastructure patch to add x86 modinfo. I kept it all in a single patch for now. I dropped the printks when the driver cpuid doesn't match (imho drivers never should print anything in such a case) One drawback is that udev doesn't know if the drivers are used or not, so they will be unconditionally loaded at boot up. That's better than not loading them at all, like it often happens. Cc: Dave Jones <davej@redhat.com> Cc: Kay Sievers <kay.sievers@vrfy.org> Cc: Jen Axboe <axboe@kernel.dk> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Huang Ying <ying.huang@intel.com> Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Thomas Renninger <trenn@suse.de> Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/crypto/padlock-sha.c')
-rw-r--r--drivers/crypto/padlock-sha.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 06bdb4b2c6a6..9266c0e25492 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
+#include <asm/cpu_device_id.h>
#include <asm/i387.h>
struct padlock_sha_desc {
@@ -526,6 +527,12 @@ static struct shash_alg sha256_alg_nano = {
}
};
+static struct x86_cpu_id padlock_sha_ids[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_PHE),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids);
+
static int __init padlock_init(void)
{
int rc = -ENODEV;
@@ -533,15 +540,8 @@ static int __init padlock_init(void)
struct shash_alg *sha1;
struct shash_alg *sha256;
- if (!cpu_has_phe) {
- printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
- return -ENODEV;
- }
-
- if (!cpu_has_phe_enabled) {
- printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
+ if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
return -ENODEV;
- }
/* Register the newly added algorithm module if on *
* VIA Nano processor, or else just do as before */